1. 程式人生 > >python學習筆記17:下載微信公眾號相關文章

python學習筆記17:下載微信公眾號相關文章

目的:從零開始學自動化測試公眾號中下載“pytest"一系列文件

1、搜尋微訊號文章關鍵字搜尋

2、對搜尋結果前N頁進行解析,獲取文章標題和對應URL

主要使用的是requests和bs4中的Beautifulsoup

Weixin.py

import requests
from urllib.parse import quote
from bs4 import BeautifulSoup
import re
from WeixinSpider.HTML2doc import MyHTMLParser

class WeixinSpider(object):

    def __init__(self, gzh_name, pageno,keyword):
        self.GZH_Name = gzh_name
        self.pageno = pageno
        self.keyword = keyword.lower()
        self.page_url = []
        self.article_list = []
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
        self.timeout = 5
        # [...]	用來表示一組字元,單獨列出:[amk] 匹配 'a','m'或'k'
        # re+	匹配1個或多個的表示式。
        self.pattern = r'[\\/:*?"<>|\r\n]+'

    def get_page_url(self):
        for i in range(1,self.pageno+1):
            # https://weixin.sogou.com/weixin?query=從零開始學自動化測試&_sug_type_=&s_from=input&_sug_=n&type=2&page=2&ie=utf8
            url = "https://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=2&page=%s&ie=utf8" \
                  % (quote(self.GZH_Name),i)
            self.page_url.append(url)

    def get_article_url(self):
        article = {}
        for url in self.page_url:
            response = requests.get(url,headers=self.headers,timeout=self.timeout)
            result = BeautifulSoup(response.text, 'html.parser')
            articles = result.select('ul[class="news-list"] > li > div[class="txt-box"] > h3 > a ')
            for a in articles:
                # print(a.text)
                # print(a["href"])
                if self.keyword in a.text.lower():
                     new_text=re.sub(self.pattern,"",a.text)
                     article[new_text] = a["href"]
                     self.article_list.append(article)



headers = {'User-Agent':
                       'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
timeout = 5
gzh_name = 'pytest文件'
My_GZH = WeixinSpider(gzh_name,5,'pytest')
My_GZH.get_page_url()
# print(My_GZH.page_url)
My_GZH.get_article_url()
# print(My_GZH.article_list)
for article in My_GZH.article_list:
    for (key,value) in article.items():
        url=value
        html_response = requests.get(url,headers=headers,timeout=timeout)
        myHTMLParser = MyHTMLParser(key)
        myHTMLParser.feed(html_response.text)
        myHTMLParser.doc.save(myHTMLParser.docfile)

HTML2doc.py

from html.parser import HTMLParser
import requests
from docx import Document
import re
from docx.shared import RGBColor
import docx


class MyHTMLParser(HTMLParser):
    def __init__(self,docname):
        HTMLParser.__init__(self)
        self.docname=docname
        self.docfile = r"D:\pytest\%s.doc"%self.docname
        self.doc=Document()
        self.title = False
        self.code = False
        self.text=''
        self.processing =None
        self.codeprocessing =None
        self.picindex = 1
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
        self.timeout = 5

    def handle_startendtag(self, tag, attrs):
        # 圖片的處理比較複雜,首先需要找到對應的圖片的url,然後下載並寫入doc中
        if tag == "img":
            if len(attrs) == 0:
                pass
            else:
                for (variable, value) in attrs:
                    if variable == "data-type":
                        picname = r"D:\pytest\%s%s.%s" % (self.docname, self.picindex, value)
                        # print(picname)
                    if variable == "data-src":
                        picdata = requests.get(value, headers=self.headers, timeout=self.timeout)
                        # print(value)
                self.picindex = self.picindex + 1
                # print(self.picindex)
                with open(picname, "wb") as pic:
                    pic.write(picdata.content)
                try:
                    self.doc.add_picture(picname)
                except docx.image.exceptions.UnexpectedEndOfFileError as e:
                    print(e)

    def handle_starttag(self, tag, attrs):
        if re.match(r"h(\d)", tag):
            self.title = True
        if tag =="p":
            self.processing = tag
        if tag == "code":
            self.code = True
            self.codeprocessing = tag

    def handle_data(self, data):
            if self.title == True:
                self.doc.add_heading(data, level=2)
            # if self.in_div == True and self.tag == "p":
            if self.processing:
                self.text = self.text + data
            if self.code == True:
                p =self.doc.add_paragraph()
                run=p.add_run(data)
                run.font.color.rgb = RGBColor(111,111,111)

    def handle_endtag(self, tag):
        self.title = False
        # self.code = False
        if tag == self.processing:
            self.doc.add_paragraph(self.text)

            self.processing = None
            self.text=''
        if tag == self.codeprocessing:
            self.code =False


執行結果:

缺少部分文件,如pytest文件4,是因為搜狗微信文章搜尋結果中就沒有