1. 程式人生 > >python多線程爬取網頁

python多線程爬取網頁

brush request ext try ems with import append ide

#-*- encoding:utf8 -*-
‘‘‘
Created on 2018年12月25日

@author: Administrator
‘‘‘
from multiprocessing.dummy import Pool as pl
import csv
import requests
from lxml import etree


def spider(url):
    header = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36                (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"}
    r = requests.get(url=url, headers=header)
    return r.json()

def spider_detail(url):
    resp = spider(url)
    title = resp.get(‘data‘).get(‘title‘)
    print(title)
    content = resp.get(‘data‘).get(‘content‘)
    try:
        title_clear = title.replace(‘|‘, ‘‘).replace(‘?‘, ‘‘)
        content_clear = content.replace(‘</p><p>‘,‘\n\n‘).replace(‘<p>‘,‘‘)
        sel = etree.HTML(content_clear)
        content_clear = sel.xpath(‘string(//*)‘)
        artical_write(title_clear, content_clear)
        print(title_clear)
    except:
        pass
    
def get_all_urls(page_number):
    for i in range(1, page_number + 1):
        url = ‘https://36kr.com/api/search-column/mainsite?per_page=20&page=‘ + str(i)
        resp = spider(url)
        artical_data = resp.get(‘data‘).get(‘items‘)
        for url_data in artical_data:
            number = url_data.get(‘id‘)
            artical_url = ‘https://36kr.com/api/post/‘+ str(number) +‘/next‘
            yield artical_url
    
def artical_write(title, content):
    with open(‘d:/spider_data/11.11/‘ + title + ‘.txt‘, ‘wt‘, encoding=‘utf-8‘) as f:
        f.write(content)

if __name__ == ‘__main__‘:
    # 線程數, 默認為cpu核心數
    pool = pl(4)
    
    # url列表收集
    all_url = []
    for url in get_all_urls(100):
        all_url.append(url)
    
    # 多線程爬取
    pool.map(spider_detail, all_url)
    pool.close()
    pool.join()

  

python多線程爬取網頁