1. 程式人生 > >教你用Python爬蟲股票評論,簡單分析股民使用者情緒

教你用Python爬蟲股票評論,簡單分析股民使用者情緒

一、背景

股民是網路使用者的一大群體,他們的網路情緒在一定程度上反映了該股票的情況,也反映了股市市場的波動情況。作為一隻時間充裕的研究僧,我課餘時間準備寫個小程式碼get一下股民的評論資料,分析使用者情緒的走勢。程式碼還會修改,因為結果不準確,哈哈!

二、資料來源   

本次專案不用於商用,資料來源於東方財富網,由於物理條件,我只獲取了一隻股票的部分評論,沒有爬取官方的帖子,都是獲取的散戶的評論。

三、資料獲取

Python是個好工具,這次我使用了selenium和PhantomJS組合進行爬取網頁資料,當然還是要分析網頁的dom結構拿到自己需要的資料。

爬蟲部分:

from selenium import webdriver  

import time  

import json  

import re    

# from HTMLParser import HTMLParser   

from myNLP import *  

# from lxml import html  

# import requests  

class Crawler:  

   url = ''  

   newurl = set()  

   headers = {}  

   cookies = {}  

   def __init__(self, stocknum, page):  

       self.url = 'http://guba.eastmoney.com/list,'+stocknum+',5_'+page+'.html'  

       cap = webdriver.DesiredCapabilities.PHANTOMJS  

       cap["phantomjs.page.settings.resourceTimeout"] = 1000  

       #cap["phantomjs.page.settings.loadImages"] = False  

       #cap["phantomjs.page.settings.localToRemoteUrlAccessEnabled"] = True  

       self.driver = webdriver.PhantomJS(desired_capabilities=cap)  

   def crawAllHtml(self,url):  

       self.driver.get(url)  

       time.sleep(2)  

#         htmlData = requests.get(url).content.decode('utf-8')  

#         domTree = html.fromstring(htmlData)  

#         return domTree  

   def getNewUrl(self,url):  

       self.newurl.add(url)  

   def filterHtmlTag(self, htmlStr):  

       self.htmlStr = htmlStr    

       #先過濾CDATA    

       re_cdata=re.compile('//<!CDATA

[>]∗//

>',re.I) #匹配CDATA    

       re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>',re.I)#Script    

       re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>',re.I)#style    

       re_br=re.compile('<br\s*?/?>')#處理換行    

       re_h=re.compile('</?\w+[^>]*>')#HTML標籤    

       re_comment=re.compile('<!--[^>]*-->')#HTML註釋    

       s=re_cdata.sub('',htmlStr)#去掉CDATA    

       s=re_script.sub('',s) #去掉SCRIPT    

       s=re_style.sub('',s)#去掉style    

       s=re_br.sub('\n',s)#將br轉換為換行    

       blank_line=re.compile('\n+')#去掉多餘的空行    

       s = blank_line.sub('\n',s)    

       s=re_h.sub('',s) #去掉HTML 標籤    

       s=re_comment.sub('',s)#去掉HTML註釋    

       #去掉多餘的空行    

       blank_line=re.compile('\n+')    

       s=blank_line.sub('\n',s)    

       return s  

   def getData(self):  

       comments = []  

       self.crawAllHtml(self.url)  

       postlist = self.driver.find_elements_by_xpath('//*[@id="articlelistnew"]/div')  

       for post in postlist:  

           href = post.find_elements_by_tag_name('span')[2].find_elements_by_tag_name('a')  

           if len(href):  

               self.getNewUrl(href[0].get_attribute('href'))  

#             if len(post.find_elements_by_xpath('./span[3]/a/@href')):  

#                 self.getNewUrl('http://guba.eastmoney.com'+post.find_elements_by_xpath('./span[3]/a/@href')[0])  

       for url in self.newurl:  

           self.crawAllHtml(url)  

           time = self.driver.find_elements_by_xpath('//*[@id="zwconttb"]/div[2]')  

           post = self.driver.find_elements_by_xpath('//*[@id="zwconbody"]/div')  

           age = self.driver.find_elements_by_xpath('//*[@id="zwconttbn"]/span/span[2]')  

           if len(post) and len(time) and len(age):  

               text = self.filterHtmlTag(post[0].text)  

               if len(text):  

                   tmp = myNLP(text)  

                   comments.append({'time':time[0].text,'content':tmp.prob, 'age':age[0].text})  

           commentlist = self.driver.find_elements_by_xpath('//*[@id="zwlist"]/div')    

           if len(commentlist):  

               for comment in commentlist:  

                   time = comment.find_elements_by_xpath('./div[3]/div[1]/div[2]')  

                   post = comment.find_elements_by_xpath('./div[3]/div[1]/div[3]')  

                   age = comment.find_elements_by_xpath('./div[3]/div[1]/div[1]/span[2]/span[2]')  

                   if len(post) and len(time) and len(age):  

                       text = self.filterHtmlTag(post[0].text)  

                       if len(text):  

                           tmp = myNLP(text)  

                           comments.append({'time':time[0].text,'content':tmp.prob, 'age':age[0].text})  

       return json.dumps(comments)  

儲存部分:
這部分其實可以用資料庫來做,但是由於只是試水,就簡單用json檔案來存部分資料:

import io  

class File:  

   name = ''  

   type = ''  

   src = ''  

   file = ''  

   def __init__(self,name, type, src):  

       self.name = name  

       self.type = type  

       self.src = src    

       filename = self.src+self.name+'.'+self.type  

       self.file = io.open(filename,'w+', encoding = 'utf-8')  

   def inputData(self,data):  

       self.file.write(data.decode('utf-8'))  

       self.file.close()  

   def closeFile(self):  

       self.file.close()  

測試用的local伺服器:

這裡只是為了要用瀏覽器瀏覽資料圖,由於需要讀取資料,js沒有許可權操作本地的檔案,只能利用一個簡單的伺服器來弄了:

import SimpleHTTPServer  
import SocketServer;  
PORT = 8000  
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler  
httpd = SocketServer.TCPServer(("", PORT), Handler);  
httpd.serve_forever()  

NLP部分:snowNLP這個包還是用來評價買賣東西的評論比較準確

不是專門研究自然語言的,直接使用他人的演算法庫。這個snowNLP可以建立一個訓練,有空自己來弄一個關於股票評論的。

#!/usr/bin/env python  
# -*- coding: UTF-8 -*-  
from snownlp import SnowNLP  
class myNLP:  
    prob = 0.5  
    def _init_(self, text):  
        self.prob = SnowNLP(text).sentiments  

主排程:

# -*- coding: UTF-8 -*-  
''''' 
Created on 2017年5月17日 
@author: luhaiya 
@id: 2016110274 
@description: 
'''  
#http://data.eastmoney.com/stockcomment/  所有股票的列表資訊  
#http://guba.eastmoney.com/list,600000,5.html 某隻股票股民的帖子頁面  
#http://quote.eastmoney.com/sh600000.html?stype=stock 查詢某隻股票  
from Crawler import *  
from File import *  
import sys  
default_encoding = 'utf-8'  
if sys.getdefaultencoding() != default_encoding:  
    reload(sys)  
    sys.setdefaultencoding(default_encoding)  
             
def main():  
    stocknum = str(600000)  
    total = dict()  
    for i in range(1,10):  
        page = str(i)  
        crawler = Crawler(stocknum, page)  
        datalist = crawler.getData()  
        comments = File(stocknum+'_page_'+page,'json','./data/')  
        comments.inputData(datalist)  
        data = open('./data/'+stocknum+'_page_'+page+'.json','r').read()  
        jsonData = json.loads(data)  
        for detail in jsonData:  
            num = '1' if '年' not in detail['age'].encode('utf-8') else detail['age'].encode('utf-8').replace('年','')  
            num = float(num)  
            date = detail['time'][4:14].encode('utf-8')  
            total[date] = total[date] if date in total.keys() else {'num':0, 'content':0}  
            total[date]['num'] = total[date]['num'] + num if total[date]['num'] else num  
            total[date]['content'] = total[date]['content'] + detail['content']*num if total[date]['content'] else detail['content']*num  
    total = json.dumps(total)  
    totalfile = File(stocknum,'json','./data/')  
    totalfile.inputData(total)  
if __name__ == "__main__":  
    main()  

四、前端資料展示

使用百度的echarts。使用者的情緒是使用當天所有評論的情緒值的加權平均,加權係數與使用者的股齡正相關。

<!DOCTYPE html>  
<html>  
<head>  
<meta charset="UTF-8">  
<title>分析圖表</title>  
<style>  
body{texr-align:center;}  
#mainContainer{width:100%;}  
#fileContainer{width:100%; text-align:center;}  
#picContainer{width: 800px;height:600px;margin:0 auto;}  
</style>  
</head>  
<body>  
<div id = 'mainContainer'>  
<div id = 'fileContainer'>這裡是資料夾列表</div>  
<div id = 'picContainer'></div>  
</div>  
<script src="http://apps.bdimg.com/libs/jquery/2.1.1/jquery.min.js"></script>   
<script src = "./echarts.js"></script>  
<script>  
main();  
function main(){  
    var stocknum = 600000;  
    getDate(stocknum);  
}  
function getDate(stocknum){  
    var src = "./data/"+stocknum+".json";  
    $.getJSON(src, function (res){  
        var date = [];  
        for(var key in res){  
            key = key.replace('-','/').replace('-','/');  
            date.push(key);  
        }  
        date.sort();  
        data = [];  
        for (var i = 0; i < date.length; i++) {  
            dat = date[i].replace('/','-').replace('/','-');  
            data.push(res[dat]['content']/res[dat]['num']);  
        }  
        drawPic(date,data);  
    })  
}  
function drawPic(date, data){  
    //initialize and setting options  
    var myChart = echarts.init(document.getElementById('picContainer'));  
    option = {  
        tooltip: {  
            trigger: 'axis',  
            position: function (pt) {  
                return [pt[0], '10%'];  
            }  
        },  
        title: {  
            left: 'center',  
            text: '股票情緒走向圖',  
        },  
        toolbox: {  
            feature: {  
                dataZoom: {  
                    yAxisIndex: 'none'  
                },  
                restore: {},  
                saveAsImage: {}  
            }  
        },  
        xAxis: {  
            type: 'category',  
            boundaryGap: false,  
            data: date  
        },  
        yAxis: {  
            type: 'value',  
            boundaryGap: [0, '100%']  
        },  
        dataZoom: [{  
            type: 'inside',  
            start: 0,  
            end: 10  
        }, {  
            start: 0,  
            end: 10,  
            handleIcon: 'M10.7,11.9v-1.3H9.3v1.3c-4.9,0.3-8.8,4.4-8.8,9.4c0,5,3.9,9.1,8.8,9.4v1.3h1.3v-1.3c4.9-0.3,8.8-4.4,8.8-9.4C19.5,16.3,15.6,12.2,10.7,11.9z M13.3,24.4H6.7V23h6.6V24.4z M13.3,19.6H6.7v-1.4h6.6V19.6z',  
            handleSize: '80%',  
            handleStyle: {  
                color: '#fff',  
                shadowBlur: 3,  
                shadowColor: 'rgba(0, 0, 0, 0.6)',  
                shadowOffsetX: 2,  
                shadowOffsetY: 2  
            }  
        }],  
        series: [  
            {  
                name:'stocknum',  
                type:'line',  
                smooth:true,  
                symbol: 'none',  
                sampling: 'average',  
                itemStyle: {  
                    normal: {  
                        color: 'rgb(255, 70, 131)'  
                    }  
                },  
                areaStyle: {  
                    normal: {  
                        color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{  
                            offset: 0,  
                            color: 'rgb(255, 158, 68)'  
                        }, {  
                            offset: 1,  
                            color: 'rgb(255, 70, 131)'  
                        }])  
                    }  
                },  
                data: data  
            }  
        ]  
    };  
    //draw pic  
    myChart.setOption(option);    
}  
</script>  
</body>  
</html>  

1af8fcdd3ec01053c4edeba851751ca50257d643

圖1是我分析使用者情緒畫出的時間推進圖,理論上小於0.5表消極情緒,大於0.5表示積極情緒。圖2是實際股價的走勢。