1. 程式人生 > >python爬取京東文胸資料(三)

python爬取京東文胸資料(三)

上篇我們只爬了一個牌子的文胸,這次我們來多爬幾個牌子的

##1.爬取不同牌子的url
得到id
其實可以直接爬那個href,但我發現有的帶了https有的沒帶就索性直接取id拼接了

import requests
import json
import threading
import time
import re
from lxml import etree

class cup:
    def __init__(self):
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
        self.url = 'https://item.jd.com/'


    def vari_cpu(self):#取到不同型別文胸的評論json
        url_list = []
        url = 'https://search.jd.com/Search?keyword=%E6%96%87%E8%83%B8&enc=utf-8&spm=2.1.1'
        html = requests.get(url,headers = self.headers).text
        html = etree.HTML(html)
        cpu_link = html.xpath('//div[@class="p-icons"]/@id')
        for i in cpu_link:#網頁字尾
            i = i[6::] #得到數字的部分
            Fin_url = f'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv12370&productId={i}'
            #format的新用法
            url_list.append(Fin_url) #url列表
        return url_list


    def get_json(self,url):
        res = requests.get(url, headers=self.headers).text
        s = re.compile(r'fetchJSON_comment.*?\(')
        uesless = str(s.findall(res))
        jd = json.loads(res.lstrip(uesless).rstrip(');'))
        com_list = jd['comments']
        for i in com_list:
            print(i['productColor'],i['productSize'])


if __name__ == '__main__':
    pa = cup()
    url_list = pa.vari_cpu()
    for j in url_list:
        for i in range(3):
            js_url = j+'&score=0&sortType=5&page=%d&pageSize=10&isShadowSku=0&rid=0&fold=1'%i
            time.sleep(1)
            t = threading.Thread(target=pa.get_json, args=(js_url,))
            t.start()

我直接把json的url裡面的id換成各自的id了,發現竟然可行,那就可行吧,省的麻煩.

這裡還是有不完善的地方,就是那個執行緒,等會兒去補補執行緒程序內容,再改進一波,這個不敢爬多,先微微爬點看看,不過我看那一堆玩意兒是貞德多,就不發了

寧外我還發現它的首頁竟然還藏著一個ajax,當你不下拉時就30個牌子,一下拉變60個了…


改進在於實現了多個牌子和執行緒池的改進

import requests
import json
from concurrent.futures import ThreadPoolExecutor
import time
import re
from lxml import etree

class cpu:
    def __init__(self):
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
        self.url = 'https://item.jd.com/'


    def vari_cpu(self):#取到不同型別文胸的評論json
        url_list = []
        url = 'https://search.jd.com/Search?keyword=%E6%96%87%E8%83%B8&enc=utf-8&spm=2.1.1'
        html = requests.get(url,headers = self.headers).text
        html = etree.HTML(html)
        cpu_link = html.xpath('//div[@class="p-icons"]/@id')
        for i in cpu_link:#網頁字尾
            i = i[6::] #得到數字的部分
            Fin_url = f'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv12370&productId={i}'
            #format的新用法
            url_list.append(Fin_url) #url列表
        return url_list


    def get_json(self,url):
        res = requests.get(url, headers=self.headers).text
        s = re.compile(r'fetchJSON_comment.*?\(')#匹配無關符號,每天會變所以用正則匹配
        uesless = str(s.findall(res)) #變成字串供下面使用
        jd = json.loads(res.lstrip(uesless).rstrip(');'))#去掉無關符號
        com_list = jd['comments']
        for i in com_list:
            print(i['productSize'])

    def get_list(self):
        Fin_url= []
        url_list = self.vari_cpu()
        for j in url_list:
            for i in range(5):
                js_url = j + '&score=0&sortType=5&page=%d&pageSize=10&isShadowSku=0&rid=0&fold=1' % i  # json的url(有規律)
                Fin_url.append(js_url)
        return Fin_url

if __name__ == '__main__':
    pa = cpu()
    Fin_url = pa.get_list()
    with ThreadPoolExecutor(max_workers=8) as pool:
        results = pool.map(pa.get_json,Fin_url)
    for i in results:
        print(i)