1. 程式人生 > >利用 Scrapy 爬取知乎用戶信息

利用 Scrapy 爬取知乎用戶信息

oauth fault urn family add token post mod lock

  思路:通過獲取知乎某個大V的關註列表和被關註列表,查看該大V和其關註用戶和被關註用戶的詳細信息,然後通過層層遞歸調用,實現獲取關註用戶和被關註用戶的關註列表和被關註列表,最終實現獲取大量用戶信息。

一、新建一個scrapy項目  

scrapy startproject zhihuuser

  移動到新建目錄下:

cd zhihuuser

  新建spider項目:

scrapy genspider zhihu

二、這裏以爬取知乎大V輪子哥的用戶信息來實現爬取知乎大量用戶信息。

a) 定義 spdier.py 文件(定義爬取網址,爬取規則等):

# -*- coding: utf-8 -*-
import json from scrapy import Spider, Request from zhihuuser.items import UserItem class ZhihuSpider(Spider): name = zhihu allowed_domains = [zhihu.com] start_urls = [http://zhihu.com/] #自定義爬取網址 start_user = excited-vczh user_url = https://www.zhihu.com/api/v4/members/{user}?include={include}
user_query = allow_message,is_followed,is_following,is_org,is_blocking,employments,answer_count,follower_count,articles_count,gender,badge[?(type=best_answerer)].topics follows_url = https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit} follows_query
= data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics followers_url = https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&offset={offset}&limit={limit} followers_query = data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics #定義請求爬取用戶信息、關註用戶和被關註用戶的函數 def start_requests(self): yield Request(self.user_url.format(user=self.start_user, include=self.user_query), callback=self.parseUser) yield Request(self.follows_url.format(user=self.start_user, include=self.follows_query, offset=0, limit=20), callback=self.parseFollows) yield Request(self.followers_url.format(user=self.start_user, include=self.followers_query, offset=0, limit=20), callback=self.parseFollowers) #請求爬取用戶詳細信息 def parseUser(self, response): result = json.loads(response.text) item = UserItem() for field in item.fields: if field in result.keys(): item[field] = result.get(field) yield item #定義回調函數,爬取關註用戶與被關註用戶的詳細信息,實現層層叠代 yield Request(self.follows_url.format(user=result.get(url_token), include=self.follows_query, offset=0, limit=20), callback=self.parseFollows) yield Request(self.followers_url.format(user=result.get(url_token), include=self.followers_query, offset=0, limit=20), callback=self.parseFollowers) #爬取關註者列表 def parseFollows(self, response): results = json.loads(response.text) if data in results.keys(): for result in results.get(data): yield Request(self.user_url.format(user=result.get(url_token), include=self.user_query), callback=self.parseUser) if paging in results.keys() and results.get(paging).get(is_end) == False: next_page = results.get(paging).get(next) yield Request(next_page, callback=self.parseFollows) #爬取被關註者列表 def parseFollowers(self, response): results = json.loads(response.text) if data in results.keys(): for result in results.get(data): yield Request(self.user_url.format(user=result.get(url_token), include=self.user_query), callback=self.parseUser) if paging in results.keys() and results.get(paging).get(is_end) == False: next_page = results.get(paging).get(next) yield Request(next_page, callback=self.parseFollowers)

b) 定義 items.py 文件(定義爬取數據的信息,使其規整等):

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

from scrapy import Field, Item


class UserItem(Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    allow_message = Field()
    answer_count = Field()
    articles_count = Field()
    avatar_url = Field()
    avatar_url_template = Field()
    badge = Field()
    employments = Field()
    follower_count = Field()
    gender = Field()
    headline = Field()
    id = Field()
    name = Field()
    type = Field()
    url = Field()
    url_token = Field()
    user_type = Field()

c) 定義 pipelines.py 文件(存儲數據到MongoDB):

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don‘t forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo

#存儲到MongoDB
class MongoPipeline(object):

    collection_name = users

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get(MONGO_URI),
            mongo_db=crawler.settings.get(MONGO_DATABASE)
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def close_spider(self, spider):
        self.client.close()

    def process_item(self, item, spider):
        self.db[self.collection_name].update({url_token: item[url_token]}, dict(item), True)        #執行去重操作
        return item

d) 定義settings.py 文件(開啟MongoDB、定義請求頭、不遵循 robotstxt 規則):

# -*- coding: utf-8 -*-
BOT_NAME = zhihuuser

SPIDER_MODULES = [zhihuuser.spiders]

# Obey robots.txt rules
ROBOTSTXT_OBEY = False  #是否遵守robotstxt規則,限制爬取內容。

# Override the default request headers(加載請求頭):
DEFAULT_REQUEST_HEADERS = {
  Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8,
  Accept-Language: en,
  User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36,
  authorization: oauth c3cef7c66a1843f8b3a9e6a1e3160e20
}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   zhihuuser.pipelines.MongoPipeline: 300,
}


MONGO_URI = localhost
MONGO_DATABASE = zhihu

三、開啟爬取:

scrapy crawl zhihu

技術分享圖片

技術分享圖片

技術分享圖片

利用 Scrapy 爬取知乎用戶信息