1. 程式人生 > >用scrapy爬取京東的資料

用scrapy爬取京東的資料

# -*- coding: utf-8 -*-
import scrapy
from ..items import JdphoneItem
import sys

reload(sys)
sys.setdefaultencoding("utf-8")


class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']  # 有的時候寫個www.jd.com會導致search.jd.com無法爬取
    keyword = "手機"
    page = 1
    url = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&click=0
' next_url = 'https://search.jd.com/s_new.php?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&scrolling=y&show_items=%s' def start_requests(self): yield scrapy.Request(self.url % (self.keyword, self.keyword, self.page), callback=self.parse)
def parse(self, response): """ 爬取每頁的前三十個商品,資料直接展示在原網頁中 :param response: :return: """ ids = [] for li in response.xpath('//*[@id="J_goodsList"]/ul/li'): item = JdphoneItem() title = li.xpath('div/div/a/em/text()').extract() # 標題
price = li.xpath('div/div/strong/i/text()').extract() # 價格 comment_num = li.xpath('div/div/strong/a/text()').extract() # 評價條數 id = li.xpath('@data-pid').extract() # id ids.append(''.join(id)) url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract() # 需要跟進的連結 item['title'] = ''.join(title) item['price'] = ''.join(price) item['comment_num'] = ''.join(comment_num) item['url'] = ''.join(url) if item['url'].startswith('//'): item['url'] = 'https:' + item['url'] elif not item['url'].startswith('https:'): item['info'] = None yield item continue yield scrapy.Request(item['url'], callback=self.info_parse, meta={"item": item}) headers = {'referer': response.url} # 後三十頁的連結訪問會檢查referer,referer是就是本頁的實際連結 # referer錯誤會跳轉到:https://www.jd.com/?se=deny self.page += 1 yield scrapy.Request(self.next_url % (self.keyword, self.keyword, self.page, ','.join(ids)), callback=self.next_parse, headers=headers) def next_parse(self, response): """ 爬取每頁的後三十個商品,資料展示在一個特殊連結中:url+id(這個id是前三十個商品的id) :param response: :return: """ for li in response.xpath('//li[@class="gl-item"]'): item = JdphoneItem() title = li.xpath('div/div/a/em/text()').extract() # 標題 price = li.xpath('div/div/strong/i/text()').extract() # 價格 comment_num = li.xpath('div/div/strong/a/text()').extract() # 評價條數 url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract() # 需要跟進的連結 item['title'] = ''.join(title) item['price'] = ''.join(price) item['comment_num'] = ''.join(comment_num) item['url'] = ''.join(url) if item['url'].startswith('//'): item['url'] = 'https:' + item['url'] elif not item['url'].startswith('https:'): item['info'] = None yield item continue yield scrapy.Request(item['url'], callback=self.info_parse, meta={"item": item}) if self.page < 200: self.page += 1 yield scrapy.Request(self.url % (self.keyword, self.keyword, self.page), callback=self.parse) def info_parse(self, response): """ 連結跟進,爬取每件商品的詳細資訊,所有的資訊都儲存在item的一個子欄位info中 :param response: :return: """ item = response.meta['item'] item['info'] = {} type = response.xpath('//div[@class="inner border"]/div[@class="head"]/a/text()').extract() name = response.xpath('//div[@class="item ellipsis"]/text()').extract() item['info']['type'] = ''.join(type) item['info']['name'] = ''.join(name) for div in response.xpath('//div[@class="Ptable"]/div[@class="Ptable-item"]'): h3 = ''.join(div.xpath('h3/text()').extract()) if h3 == '': h3 = "未知" dt = div.xpath('dl/dt/text()').extract() dd = div.xpath('dl/dd[not(@class)]/text()').extract() item['info'][h3] = {} for t, d in zip(dt, dd): item['info'][h3][t] = d yield item