python爬蟲3——爬取騰訊招聘全部招聘資訊
阿新 • • 發佈:2018-12-15
python爬蟲2中,已經有了初步的程式碼,之後做了優化
增加了工作職責、工作要求:
獲取的資料有:
程式碼如下:
#!/usr/bin/env python # -*- coding:utf-8 -*- from bs4 import BeautifulSoup import urllib2 import urllib import json # 使用了json格式儲存 def tengxun(detail,num): url = 'https://hr.tencent.com/' # detail = 'position.php?&start=0#a' request = urllib2.Request(url + detail) response =urllib2.urlopen(request) resHtml = response.read() soup = BeautifulSoup(resHtml,'html.parser', from_encoding='utf-8') result = soup.select(".even") result += soup.select(".odd") # print len(result) # 處理頁面 items = [] for node in result: item = {} # 職位名 zname = node.select('td')[0].get_text() # 職位類別 ztype = node.select('td')[1].get_text() # 人數 znum = node.select('td')[2].get_text() # 地點 zlocal = node.select('td')[3].get_text() # 釋出時間 ztime = node.select('td')[4].get_text() # 連結 detailLink = node.select('td a')[0].attrs['href'] # 獲取工作職責、工作要求 request1 = urllib2.Request(url + detailLink) response1 = urllib2.urlopen(request1) jobHtml = response1.read() soup1 = BeautifulSoup(jobHtml, 'html.parser', from_encoding='utf-8') # print len(soup1.select('ul.squareli')) # 工作職責 jobRes = '' for li in soup1.select('ul.squareli')[0].select('li') : jobRes += li.get_text() + '\n' # 工作要求 jobReq = '' for li in soup1.select('ul.squareli')[1].select('li') : jobReq += li.get_text() + '\n' # print jobReq # 將資料存入item中 item['zname']=zname; item['detailLink'] = detailLink; item['ztype']=ztype item['znum'] = znum item['zlocal'] = zlocal item['ztime'] = ztime item['jobRes'] = jobRes item['jobReq'] = jobReq # 處理工作職責和工作要求 items.append(item) origin = [] print(len(items)) # 以json格式輸出到檔案中 # 禁用ascii編碼,按utf-8編碼 output = open('tencent.json'+ str(num), 'w') for i in origin: items.append(i) line = json.dumps(items, ensure_ascii=False); # print line output.write(line.encode('utf-8')) output.close() # print resHtml for i in range(303): print("進行到第" + str(i) + "頁") url = 'position.php?&start='+ str(i * 10) +'#a' tengxun(url, i)
取出來的json資料: