1. 程式人生 > >LDA模型應用實踐-希拉裏郵件主題分類

LDA模型應用實踐-希拉裏郵件主題分類

pan not num logs div 把他 2-2 rac mail

#coding=utf8
import numpy as np
import pandas as pd
import re
from gensim import corpora, models, similarities
import gensim
from nltk.corpus import stopwords

df = pd.read_csv("./input/HillaryEmails.csv")
# 原郵件數據中有很多Nan的值,直接扔了。
df = df[[‘Id‘, ‘ExtractedBodyText‘]].dropna()

def clean_email_text(text):
    text = text.replace(‘\n‘," ") #新行,我們是不需要的
    text = re.sub(r"-", " ", text) #把 "-" 的兩個單詞,分開。(比如:july-edu ==> july edu)
    text = re.sub(r"\d+/\d+/\d+", "", text) #日期,對主體模型沒什麽意義
    text = re.sub(r"[0-2]?[0-9]:[0-6][0-9]", "", text) #時間,沒意義
    text = re.sub(r"[\w]+@[\.\w]+", "", text) #郵件地址,沒意義
    text = re.sub(r"/[a-zA-Z]*[:\//\]*[A-Za-z0-9\-_]+\.+[A-Za-z0-9\.\/%&=\?\-_]+/i", "", text) #網址,沒意義
    pure_text = ‘‘
    # 以防還有其他特殊字符(數字)等等,我們直接把他們loop一遍,過濾掉
    for letter in text:
        # 只留下字母和空格
        if letter.isalpha() or letter==‘ ‘:
            pure_text += letter
    # 再把那些去除特殊字符後落單的單詞,直接排除。
    # 我們就只剩下有意義的單詞了。
    text = ‘ ‘.join(word for word in pure_text.split() if len(word)>1)
    return text

docs = df[‘ExtractedBodyText‘]
docs = docs.apply(lambda s: clean_email_text(s))
doclist = docs.values
stopwords = set(stopwords.words(‘english‘))

texts = [[word for word in doc.lower().split() if word not in stopwords] for doc in doclist]

dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]

lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)


print lda.print_topics(num_topics=20, num_words=5)

  技術分享

LDA模型應用實踐-希拉裏郵件主題分類