1. 程式人生 > >用Scrapy爬取筆趣閣小說

用Scrapy爬取筆趣閣小說

今天早上無聊,去筆趣閣扒了點小說存Mongodb裡存著,想著哪天做一個小說網站有點用,無奈網太差,爬了一個小時就爬了幾百章,爬完全網的小說,不知道要到猴年馬月去了。再說說scrapy這個爬蟲框架,真是不用不知道,一用嚇一跳,這個實在太好用了,比自己用request,Beautifulsoup這些模組來爬,實在要簡單不知多少倍。廢話不多說,現在開始上程式碼。

首先用virtualEnv建立虛擬環境並pip安裝Scrapy的步驟我就不多廢話了,建好project後在專案目錄下會有如下幾個檔案

我們先點開items.py這個檔案開始定義欄位,這些欄位用來儲存資料,方便我們後續的操作。其中name是小說名字,author是作者,content是小說內容。

import scrapy


class ClawerItem(scrapy.Item):
    # define the fields for your item here like:
    name = scrapy.Field()
    author = scrapy.Field()
    content = scrapy.Field()

定義好欄位後,我們就在spiders資料夾中編寫自己的爬蟲:rules可以理解成給定一個規則,讓爬蟲自己去爬這些網頁,其中正則表示式則代表字首滿足http://www/biquge.com.tw/的任何網頁,也就是爬整個筆趣閣, callback則是呼叫parse_item的方法。在parse_item裡面,xpath只需在網頁中開啟開發者工具然後找到需要的地方,右鍵copy就可以了。至於不知道Xpath是什麼的同學如果有興趣可以到runoob去看看。

from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from clawer.items import ClawerItem


class MovieSpider(CrawlSpider):
    name = 'novel'
    allowed_domains = ['www.biquge.com.tw']
    start_urls = ['http://www.biquge.com.tw/']
    rules = (
        Rule(LinkExtractor(allow=(r'http://www.biquge.com.tw/[a-z]+/$'))),
        Rule(LinkExtractor(allow=(r'http://www.biquge.com.tw/\d+_\d+/$')), ),
        Rule(LinkExtractor(allow=(r'http://www.biquge.com.tw/\d+_\d+/\d+.html$')), callback='parse_item'),
    )

    def parse_item(self, response):
        sel = Selector(response)
        item = ClawerItem()

        item['name'] = sel.xpath('//div[@class="bookname"]/div/a[3]/text()').extract_first()
        item['author'] = sel.xpath('//*[@id="newscontent"]/div[1]/ul/li[1]/span[3]/text()').extract()
        item['total'] = sel.xpath('//*[@id="wrapper"]/div[4]/div/div[1]/a[2]/text()').extract()
        contents = response.xpath('//*[@id="content"]/text()')
        s = ''
        for content in contents:
            if len(content.re('\S+')) > 0:
                s += content.re('\S+')[0]
        item['content'] = s
        return item

接著開始在pipelines.py中完成對資料進行持久化的操作。

import pymongo

from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log


class ClawerPipeline(object):

    def __init__(self):
        connection = pymongo.MongoClient(settings['MONGODB_SERVER'], settings['MONGODB_PORT'])
        db = connection[settings['MONGODB_DB']]
        self.collection = db[settings['MONGODB_COLLECTION']]

    def process_item(self, item, spider):
        #Remove invalid data
        valid = True
        for data in item:
          if not data:
            valid = False
            raise DropItem("Missing %s of blogpost from %s" %(data, item['url']))
        if valid:
        #Insert data into database
            new_moive=[{
                "name": item['name'],
                "author":item['author'],
                "content":item['content'],
            }]
            self.collection.insert(new_moive)
            log.msg("Item wrote to MongoDB database %s/%s" %
            (settings['MONGODB_DB'], settings['MONGODB_COLLECTION']),
            level=log.DEBUG, spider=spider)
        return item

最後修改一下setting裡面的配置:

BOT_NAME = 'clawer'

SPIDER_MODULES = ['clawer.spiders']
NEWSPIDER_MODULE = 'clawer.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \
                Chrome/27.0.1453.94 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = True

MONGODB_SERVER = '47.106.144.34'
MONGODB_PORT = 27017
MONGODB_DB = 'xuanhuan'
MONGODB_COLLECTION = 'novel'

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'clawer.middlewares.ClawerSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'clawer.middlewares.ClawerDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'clawer.pipelines.ClawerPipeline': 300,
}

LOG_LEVEL = 'DEBUG'

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

現在就可以在命令列輸入scrapy crawl novel 就可以開始爬小說存入mongodb了。