1. 程式人生 > >爬取貓眼電影榜單的top100並輸出csv檔案

爬取貓眼電影榜單的top100並輸出csv檔案

#!/usr/bin/env python 
# -*- coding:utf-8 -*-
#encoding = utf-8
import json
import pandas as pd
import requests
from gevent.pool import Pool
from requests.exceptions import RequestException
import re
import csv


def get_one_page(url):
    try:
        hd = {'user-agent':'Chrome/10'}
        response = requests.get(url, headers=hd)
        if
response.status_code==200: return response.text return None except RequestException: return None def parse_one_page(html): pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a' +'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S) items = re.findall(pattern, html) for item in items: yield { 'id':item[0], 'image':item[1], 'title':item[2], 'actor':item[3].strip()[3
:], 'time':item[4].strip()[5:], 'score':item[5]+item[6] } def write_to_file(content): with open('result.txt', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False) + '\n') def main(): con_list = [] for i in range(10): url = 'http://maoyan.com/board/4?offset=' + str(i*10) html = get_one_page(url) # parse_one_page(html) # print(html) for item in parse_one_page(html): con_list.append(item) # print(item) # write_to_file(item) #寫入到txt檔案中 print(con_list)#測試是否為[{}{}...]檔案 df = pd.DataFrame(con_list, columns=['id', 'image', 'title', 'actor', 'time', 'score']) # print(df)# 輸出pandas結果集 df.to_csv('./myfir.csv', index=False, encoding='utf_8_sig') if __name__ == '__main__': main() # for i in range(10): # pool =Pool() # pool.map(main, [i*10 for i in range(10)]) # 將陣列中的每個元素提取出來當作函式的引數,建立一個個程序,放進程序池中 # 第一個引數是函式,第二個引數是一個迭代器,將迭代器中的數字作為引數依次傳入函式中