1. 程式人生 > >word2vec訓練維基百科中文詞向量

word2vec訓練維基百科中文詞向量

一、環境:

win10+anaconda3+TensorFlow1.10

二、資料:

三、word2vec步驟:

1. 將xml的wiki資料轉換成text資料

先將zhwiki-latest-pages-articles.xml.bz2檔案複製到process.py所在目錄下,cmd進入process.py檔案所在目錄,執行如下命令:python process.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.text 大約需執行半小時,程式碼如下: `#!\E:\software\anaconda\python

#-- coding: utf-8 -

import os import logging import sys from gensim.corpora import WikiCorpus

if name==‘main’: program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) logging.basicConfig(format=’%(asctime)s: %(levelname)s: %(message)s’) logging.root.setLevel(level=logging.INFO) if len(sys.argv) < 3: print(globals()[‘doc

’] %locals()) sys.exit(1) inp, outp = sys.argv[1:3] space = ’ ’ i = 0 output = open(outp, ‘w’, encoding=‘utf-8’) wiki = WikiCorpus(inp, lemmatize=False, dictionary={}) for text in wiki.get_texts(): data = space.join(text) output.write(str(data) + ‘\n’) i = i + 1 if i % 10000 == 0: logger.info(‘Saved ’ + str(i) + ’ articles’) output.close()
logger.info
(‘Finished ’ + str(i) + ’ articles’)

#python process.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.text`

注:需先新增python為系統環境變數,如未設定,自行百度新增。

2. 將繁體中文轉化為簡體

下載並安裝opencc或,在此連線https://bintray.com/package/files/byvoid/opencc/OpenCC自行下載適合版本 先將wiki.zh.text檔案複製到opencc所在目錄下,cmd下進入opencc.exe所在目錄,執行如下命令:opencc -i wiki.zh.text -o test.txt -c t2s.json -i表示輸入檔案,-o表示輸出檔案,t2s.json表示繁體轉換為簡體

3. 分詞

先去除標點、數字、英文字母、空格後,使用jieba分詞對語料資料分詞,程式碼如下:

import jieba
import jieba.analyse
import jieba.posseg as pseg
import codecs,sys
import re
def cut_words(sentence):
    #print sentence
    return " ".join(jieba.cut(sentence)).encode('utf-8')

punc = '[’!"#$%&\'()*+,-./:;<=>[email protected][\\]^_`{|}~]+'

f=codecs.open('test.txt','r',encoding="utf8")
target = codecs.open("zh.jian.wiki.seg.txt", 'w',encoding="utf8")
print ('open files')
line_num=1
line = f.readline()
while line:
    print('---- processing ', line_num, ' article----------------')
    # line = line.strip()
    line = re.sub(punc, '', line)
    line = re.sub(r'[A-Za-z0-9]|/d+','',line)   #用於移除英文和數字
    line = line.replace(' ','')
    line_seg = " ".join(jieba.cut(line))
    target.writelines(line_seg)
    line_num = line_num + 1
    line = f.readline()
f.close()
target.close()
exit()

4. 去停用詞

停用詞大同小異,根據需要自行下載。 附上一段很low,用起來還可以的程式碼:

#-*-coding: utf-8 -*-
#__author__ = 'Gavin'

import codecs,sys


stop_words = []
with open('stopwords.txt',mode='r',encoding='utf-8') as f:
    line = f.readline()
    while line:
        stop_words.append(line[:-1])
        line = f.readline()
stop_words = set(stop_words)
print('停用詞讀取完畢,共{n}個單詞'.format(n=len(stop_words)))

f = codecs.open('zh.jian.wiki.seg.txt','r',encoding="utf-8")
target = codecs.open("wiki.seg.del.stopwords.txt", 'w',encoding="utf-8")
print ('open files')
line_num=1
line = f.readline()
while line:
    print('---- processing ', line_num, ' article----------------')
    if len(line) > 0:
        for word in line:
            if word in stop_words:
                line = line.replace(word,"")
        line = line.replace("  "," ")
    target.writelines(line)
    line_num = line_num + 1
    line = f.readline()
f.close()
target.close()

5. 使用word2vec生成詞向量

cmd進入word2vec_model目錄下,執行如下命令:python word2vec_model.py wiki.seg.del.stopwords.txt wiki.zh.text.model wiki.zh.text.vector 執行如下指令碼檔案:

import logging
import os.path
import sys
import multiprocessing
from gensim.corpora import WikiCorpus
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
if __name__ == '__main__':
    
    program = os.path.basename(sys.argv[0])
    logger = logging.getLogger(program)
    logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
    logging.root.setLevel(level=logging.INFO)
    logger.info("running %s" % ' '.join(sys.argv))
    # check and process input arguments
    if len(sys.argv) < 4:
        print (globals()['__doc__'] % locals())
        sys.exit(1)
    inp, outp1, outp2 = sys.argv[1:4]
    model = Word2Vec(LineSentence(inp), size=400, window=5, min_count=5, workers=multiprocessing.cpu_count(),iter=100)
    model.save(outp1)
    model.wv.save_word2vec_format(outp2, binary=False)

6. 測試

from gensim.models import Word2Vec

en_wiki_word2vec_model = Word2Vec.load('wiki.zh.text.model')

testwords = ['遊戲','數學','語言','傻子','籃球','垃圾']
for i in range(6):
    res = en_wiki_word2vec_model.most_similar(testwords[i])
    print (testwords[i])
    print (res)

只訓練了5個epoch的模型,效果如下: 遊戲 [(‘該遊戲’, 0.7471304535865784), (‘動作遊戲’, 0.6874773502349854), (‘小遊戲’, 0.6644865274429321), (‘險遊戲’, 0.6395503282546997), (‘遊戲性’, 0.6331509351730347), (‘電子遊戲’, 0.6327992081642151), (‘電腦遊戲’, 0.6284534931182861), (‘街機版’, 0.6260551810264587), (‘該作’, 0.6201362609863281), (‘玩家’, 0.6175597906112671)] 數學 [(‘微積分’, 0.7359386682510376), (‘算術’, 0.6960347890853882), (‘高數學’, 0.6587228178977966), (‘率論’, 0.650739312171936), (‘數學分析’, 0.6399469375610352), (‘邏輯學’, 0.616889238357544), (‘數論’, 0.6141794919967651), (‘數理邏輯’, 0.6043234467506409), (‘幾學’, 0.5979959964752197), (‘理論物理’, 0.5952602624893188)] 語言 [(‘語法’, 0.6683811545372009), (‘種語言’, 0.6223651170730591), (‘母語’, 0.6111584305763245), (‘民族語言’, 0.5865883231163025), (‘書面語’, 0.5793792009353638), (‘語言文字’, 0.578825056552887), (‘句法’, 0.5718889236450195), (‘程式語言’, 0.5688109993934631), (‘漢語’, 0.5685830116271973), (‘語系’, 0.5685670375823975)] 傻子 [(‘屁’, 0.6098009347915649), (‘混蛋’, 0.588520348072052), (‘吹牛’, 0.5869060754776001), (‘蠢’, 0.5845921039581299), (‘嚇死’, 0.5827444195747375), (‘瘋子’, 0.5645797252655029), (‘倒黴’, 0.5608420372009277), (‘說好’, 0.5594315528869629), (‘聰明人’, 0.5587517023086548), (‘耍’, 0.5586895942687988)] 籃球 [(‘美式足球’, 0.6449360847473145), (‘男子籃球’, 0.5934199094772339), (‘排球’, 0.5750641822814941), (‘橄欖球’, 0.5683438777923584), (‘籃球隊’, 0.5496147274971008), (‘網球’, 0.5251143574714661), (‘棒球’, 0.5236964821815491), (‘中國籃球’, 0.5215005874633789), (‘冰球’, 0.5158861875534058), (‘籃球運動’, 0.5121563673019409)] 垃圾 [(‘廢物’, 0.599250316619873), (‘焚化爐’, 0.5938857793807983), (‘掩埋場’, 0.5912256240844727), (‘填埋場’, 0.5648717880249023), (‘無害化’, 0.5433459281921387), (‘廢棄物’, 0.5368970036506653), (‘垃圾堆’, 0.5368722081184387), (‘廚餘’, 0.5288323760032654), (‘焚化’, 0.5150618553161621), (‘汙水’, 0.5059441924095154)]