>>> from nltk.tokenize.stanford_segmenter import StanfordSegmenter
>>> segmenter = StanfordSegmenter(path_to_jar='stanford-segmenter-3.8.0.jar', path_to_sihan_corpora_dict='./data', path_to_model='./data/pku.gz', path_to_dict='./data/dict-chris6.ser.gz')
>>> sentence = u'这是斯坦福中文分词器测试'
>>> segmenter.segment(sentence)
u'\u8fd9 \u662f \u65af\u5766\u798f \u4e2d\u6587 \u5206\u8bcd\u5668 \u6d4b\u8bd5\n'
>>> segmenter.segment_file('test.simp.utf8')
u'\u9762\u5bf9 \u65b0 \u4e16\u7eaa \uff0c \u4e16\u754c \u5404\u56fd \u4eba\u6c11 \u7684 \u5171\u540c \u613f\u671b \u662f \uff1a \u7ee7\u7eed \u53d1\u5c55 \u4eba\u7c7b \u4ee5\u5f80 \u521b\u9020 \u7684 \u4e00\u5207 \u6587\u660e \u6210\u679c \uff0c \u514b\u670d 20 \u4e16\u7eaa \u56f0\u6270 \u7740 \u4eba\u7c7b \u7684 \u6218\u4e89 \u548c \u8d2b\u56f0 \u95ee\u9898 \uff0c \u63a8\u8fdb \u548c\u5e73 \u4e0e \u53d1\u5c55 \u7684 \u5d07\u9ad8 \u4e8b\u4e1a \uff0c \u521b\u9020 \u4e00\u4e2a \u7f8e\u597d \u7684 \u4e16\u754c \u3002\n'
>>> outfile = open('outfile', 'w')
>>> result = segmenter.segment(sentence)
>>> outfile.write(result.encode('UTF-8'))
>>> outfile.close()
04-26 14:11