1. 程式人生 > >scrapy爬去知乎使用者+代理池實現

scrapy爬去知乎使用者+代理池實現

spider:

# -*- coding: utf-8 -*-
import json

from scrapy import Spider, Request

from zhihuuser.items import UserItem
# https://www.cnblogs.com/lei0213/p/7904994.html

class ZhihuSpider(Spider):
    # handle_httpstatus_list = [403]
    name = 'zhihu'

    allowed_domains = ['www.zhihu.com']
    start_urls = ['http://www.zhihu.com/']
    start_user = 'Boyka2016'

    user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}'
    user_query = 'data[*].cover,ebook_type,comment_count,voteup_count'

    followers_url = 'https://www.zhihu.com/api/v4/members/{user}/followers?' \
                    'include={include}&offset={offset}&limit={limit}'
    followers_query = 'data[*].answer_count,articles_count,gender,follower_count,' \
                      'is_followed,is_following,badge[?(type=best_answerer)].topics'

    follows_url = 'https://www.zhihu.com/api/v4/members/{user}/followees?include={include}' \
                  '&offset={offset}&limit={limit}'
    follows_query = 'data[*].answer_count,articles_count,gender,follower_count,' \
                    'is_followed,is_following,badge[?(type=best_answerer)].topics'

    # def make_requests_from_url(self,url):
    #     self.logger.debug('Try first time')
    #     return scrapy.Request(url=url, meta={'download_timeout':10},callback=self.parse_user(), dont_filter=False)
    #     return scrapy.Request(url=url, meta={'download_timeout': 10}, callback=self.parse_follows, dont_filter=False)
    #     return scrapy.Request(url=url, meta={'download_timeout': 10}, callback=self.parse_followers, dont_filter=False)

    def start_requests(self):
        yield Request(self.user_url.format(user=self.start_user, include=self.user_query), callback=self.parse_user,dont_filter=True)
        yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, offset=0, limit=20),
                      callback=self.parse_followers,dont_filter=True)
        yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, offset=0, limit=20),
                      callback=self.parse_follows,dont_filter=True)

    def parse_user(self, response):
        result = json.loads(response.text)
        item = UserItem()
        for field in item.fields:#item.fields輸出field的所有 名稱
            if field in result.keys():
                item[field] = result.get(field)
        yield item

        #獲取當前使用者的粉絲列表
        yield Request(self.followers_url.format(user=result.get('url_token'), include=self.user_query,
                                                offset=0, limit=20), callback=self.parse_followers,dont_filter=True)
        #獲取當前使用者關注列表
        yield Request(self.follows_url.format(user=result.get('url_token'), include=self.user_query,
                                              offset=0, limit=20), callback=self.parse_follows,dont_filter=True)

    def parse_followers(self, response):
        results = json.loads(response.text)

        if 'data' in results.keys():
            for result in results.get('data'):
                yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query),
                              callback=self.parse_user,dont_filter=True)

        if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
            next_page = results.get('paging').get('next')
            yield Request(next_page, callback=self.parse_followers,dont_filter=True)

    def parse_follows(self, response):
        results = json.loads(response.text)

        if 'data' in results.keys():
            for result in results.get('data'):
                yield Request(self.user_url.format(user=result.get('url_token'), include=self.user_query),
                              callback=self.parse_user,dont_filter=True)

        if 'paging' in results.keys() and results.get('paging').get('is_end') == False:
            next_page = results.get('paging').get('next')
            yield Request(next_page, callback=self.parse_follows,dont_filter=True)

    def parse(self, response):
        pass

seting.py

# -*- coding: utf-8 -*-

# Scrapy settings for zhihuuser project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'zhihuuser'

SPIDER_MODULES = ['zhihuuser.spiders']
NEWSPIDER_MODULE = 'zhihuuser.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'zhihuuser (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'en',
    'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'
}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'zhihuuser.middlewares.ZhihuuserSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html

DOWNLOADER_MIDDLEWARES = {
 'zhihuuser.middlewares.ProxyMiddleware':200,
 'scrapy.downloadermiddlewares.retry.RetryMiddleware':543
}


# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'zhihuuser.pipelines.MongoPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

# HTTPERROR_ALLOW_ALL=True

MONGO_URI = 'localhost'
MONGO_DB = 'zhihu'

注意downloadmiddleline要設定,這樣才能使用middleline的代理池

# middlewares.py

import requests
import scrapy

class ProxyMiddleware(object):

    def get_proxy(self):
        return requests.get("http://47.106.229.200:5010/get").text

    def delete_proxy(self,proxy):
        requests.get("http://http://47.106.229.200:5010/delete?proxy={}".format(proxy))

    # your spider code

    # def getHtml():
    #     # ....
    #     retry_count = 5
    #     proxy = get_proxy()
    #     while retry_count > 0:
    #         try:
    #             html = requests.get('https://www.example.com', proxies={"http": "http://{}".format(proxy)})
    #             # 使用代理訪問
    #             return html
    #         except Exception:
    #             retry_count -= 1
    #     # 出錯5次, 刪除代理池中代理
    #     delete_proxy(proxy)
    #     return proxy

    # def process_request(self, request,spider):
    #     proxy ='http://'+self.get_proxy()
    #     print("Current IP:Port is %s" % proxy)
    #     request.meta['proxy'] =proxy # 協議://IP地址:埠(如 http://5.39.85.100:30059)
    #     # return request

    def process_response(self, request, response, spider):
        return response
    def process_request(self, request, spider):
        pro_addr = self.get_proxy()
        request.meta['proxy'] = 'http://'+pro_addr