1. 程式人生 > >簡單Python爬蟲實現(二)

簡單Python爬蟲實現(二)

目的:從百度百科python頁抓取相關超連結的詞條,輸出到html中

程式的主要目錄為


主函式

from test import url_manager
from test import html_downloader
from test import html_parser
from test import html_output
主函式
if __name__ == "__main__":   # main函式   __name__中間是兩個下劃線  main函式不能寫錯!!!!
    root_url="http://baike.baidu.com/view/21087.htm"  # 入口地址
    obj_spider = SpiderMain()  # 建立一個spider
    obj_spider.craw(root_url)   #  呼叫spider的craw方法,啟動爬蟲
上述定義了一個類SpiderMain,並呼叫了類中的craw方法 ,類實現如下
class SpiderMain(object):
    def __init__(self):
        self.urls = url_manager.UrlManager()  <span style="font-family:Microsoft YaHei;"># </span>url管理器 ——分別對應一個類。
        self.downloader = html_downloader.HtmlDownloader()  <span style="font-family:Microsoft YaHei;"># </span>html下載<span style="font-family:Microsoft YaHei;">器</span>
        self.parser = html_parser.HtmlParser()  <span style="font-family:Microsoft YaHei;"># </span>html解析器
        self.output = html_output.HtmlOutPuter() <span style="font-family:Microsoft YaHei;"># </span>html輸出

    def craw(self, root_url):  # 爬蟲的排程程式
        count = 1  # 記錄當前爬的第幾個url
        # 入口 url 新增到 url 管理器
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url():  # 有待取去url
            try:

                new_url = self.urls.get_new_url()  # 獲取最新的url
                print('craw %d :%s' % (count, new_url))
                # 啟動下載器並存儲
                html_cont = self.downloader.downlad(new_url) # 下載url
                new_urls,new_data = self.parser.paser(new_url,html_cont)  # 得到新的url和資料
                self.urls.add_new_urls(new_urls)  # 新增到url管理器
                self.output.collect_data(new_data)  # 收集資料

                if count == 100:
                    break
                count += 1
            except Exception as error:
                print(error)

        # 輸出收集好的資料
        self.output.output_html()

按照craw方法中呼叫順序:

URL管理器

# url 管理器需要維護待爬取的 url 列表 和 已爬取的 url 列表
class UrlManager(object):
    def __init__(self):
        self.new_urls = set()
        self.old_urls = set()

    # 新增url
    def add_new_url(self, root_url):
        if root_url is None:
            return
        if root_url not in self.new_urls and root_url not in self.old_urls:
            self.new_urls.add(root_url)

    # 新增urls集合
    def add_new_urls(self, new_urls):
        if new_urls is None or len(new_urls) == 0:
            return
        for url in new_urls:
            self.add_new_url(url)

    # 判斷是否有新的待爬取的url
    def has_new_url(self):
        return len(self.new_urls) != 0

    # 獲取新的待爬url
    def get_new_url(self):
        new_url = self.new_urls.pop()
        self.old_urls.add(new_url)
        return new_url


下載器

import urllib


# 下載器程式碼——下載網頁
class HtmlDownloader(object):

    def downlad(self, new_url):
        if new_url is None:
            return None

        response = urllib.request.urlopen(new_url)
        if response.getcode() != 200:
            return None
        return response.read().decode('utf-8')

解析器

## -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
import re
import urllib.parse
import urllib.request


class HtmlParser(object):

    def _get_new_urls(self, page_url, soup):
        new_urls = set()
        # /view/123.html
        links = soup.find_all('a', href=re.compile(r'/view/\d+\.htm')) # 不是html
        for link in links:
            new_url = link['href']
            # 讓 new_url 以 page_url 為模板拼接成一個全新的 url
            new_full_url = urllib.parse.urljoin(page_url, new_url)
            print(new_url) # /view/10812319.htm
            print(new_full_url) # http://baike.baidu.com/view/10812319.htm
            new_urls.add(new_full_url)
        return new_urls


    def _get_new_data(self, page_url, soup):
        res_data = {}

        # url
        res_data['url'] = page_url

         # <dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
        title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title")
        # 如果沒找到 'lemmaWgt-lemmaTitle-title' 類,直接跳過
        if title_node == None:
            res_data['title'] = ''
            res_data['summary'] = ''
            return res_data
        else:
            title_node = title_node.find("h1")
            res_data['title'] = title_node.get_text()

        # <div class="lemma-summary">
        summary_node = soup.find('div', class_="lemma-summary")
        if summary_node == None:
            res_data['summary'] = ''
        else:
            res_data['summary'] = summary_node.get_text()

        return res_data

    def paser(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return
        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding = 'utf-8')
        new_urls = self._get_new_urls(page_url, soup)
        new_data = self._get_new_data(page_url, soup)
        return new_urls, new_data

# python3對urllib和urllib2進行了重構,
# 拆分成了urllib.request, urllib.response, urllib.parse, urllib.error等幾個子模組,
# urljoin現在對應的函式是urllib.parse.urljoin

輸出

class HtmlOutPuter(object):

    def __init__(self):
        self.datas = []

    def collect_data(self, data):
        if data is None:
            return
        self.datas.append(data)

    def output_html(self):
        fout = open('outputer.html', 'w', encoding='utf-8')

        fout.write('<html>')
        fout.write("<head><meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\"></head>")
        fout.write('<body>')
        fout.write('<table>')

        # ascii python 預設編碼
        for data in self.datas:
            fout.write('<tr>')
            fout.write('<td>%s</td>' % data['url'])
            fout.write('<td>%s</td>' % data['title'])
            fout.write('<td>%s</td>' % data['summary'])
            fout.write('</tr>')

        fout.write('</table>')
        fout.write('</body>')
        fout.write('</html>')

        fout.close()

執行spider_main執行結果

程式碼是根據慕課網上老師講的敲得,這是入門級別的簡單的爬蟲...後續路還很長.....