1. 程式人生 > >[selenium]selenium驅動chrome爬取網頁/無介面chrome/使用代理

[selenium]selenium驅動chrome爬取網頁/無介面chrome/使用代理

selenium與chromedriver安裝

  • 安裝chrome(有版本要求,linux和windows版本要求不同,可自行查閱,儘量使用61+版本的chrome)

  • 先安裝selenium庫,在下載chromedriver,將chromedriver放入環境變數方便selenium呼叫。

selenium呼叫chrome

程式碼可以直接執行,只需修改代理引數,該程式碼實現了:
1. 無代理爬取京東單個商品:selenium+headless chrome
2. 普通代理爬取京東單個商品:selenium+headless chrome+proxy
3. 需要驗證的代理爬取京東單個商品:selenium+headless chrome+proxy(auth)(暫時無法使用headless方式)

#!/usr/bin/env python3
# coding=utf-8
import logging
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException, StaleElementReferenceException
from selenium.webdriver.chrome.options import Options
import time
import json
from json import decoder
import
os import re import zipfile # https://github.com/revotu/selenium-chrome-auth-proxy下載外掛放入指定資料夾 CHROME_PROXY_HELPER_DIR = 'proxy_helper/' CUSTOM_CHROME_PROXY_EXTENSIONS_DIR = 'proxy_helper/' def get_chrome_proxy_extension(): # 若要使用需要驗證的代理,使用該函式生成外掛,自行填寫賬號密碼 username = 'xxxxxxxxxx' password = 'xxxxxxxxxx'
ip = 'xxxxxxxxx' port = 'xxxx' # 建立一個定製Chrome代理擴充套件(zip檔案) if not os.path.exists(CUSTOM_CHROME_PROXY_EXTENSIONS_DIR): os.mkdir(CUSTOM_CHROME_PROXY_EXTENSIONS_DIR) extension_file_path = os.path.join(CUSTOM_CHROME_PROXY_EXTENSIONS_DIR, '{}.zip'.format('[email protected]_9010')) if not os.path.exists(extension_file_path): # 擴充套件檔案不存在,建立 zf = zipfile.ZipFile(extension_file_path, mode='w') zf.write(os.path.join(CHROME_PROXY_HELPER_DIR, 'manifest.json'), 'manifest.json') # 替換模板中的代理引數 background_content = open(os.path.join(CHROME_PROXY_HELPER_DIR, 'background.js')).read() background_content = background_content.replace('%proxy_host', ip) background_content = background_content.replace('%proxy_port', port) background_content = background_content.replace('%username', username) background_content = background_content.replace('%password', password) zf.writestr('background.js', background_content) zf.close() return extension_file_path class Crawler(object): def __init__(self, proxy=None): chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') prefs = {"profile.managed_default_content_settings.images": 2} # not load image chrome_options.add_experimental_option("prefs", prefs) if proxy: proxy_address = proxy['https'] chrome_options.add_argument('--proxy-server=%s' % proxy_address) # 使用不需驗證的代理 # chrome_options.add_extension(get_chrome_proxy_extension()) # 使用需要驗證的代理 logging.info('Chrome using proxy: %s', proxy['https']) self.chrome = webdriver.Chrome(chrome_options=chrome_options) # wait 3 seconds for start session (may delete) self.chrome.implicitly_wait(5) # set timeout like requests.get() # jd sometimes load google pic takes much time self.chrome.set_page_load_timeout(60) # set timeout for script self.chrome.set_script_timeout(60) def get_jd_item(self, item_id): item_info = () url = 'https://item.jd.com/' + item_id + '.html' try: self.chrome.get(url) name = self.chrome.find_element_by_xpath("//*[@class='sku-name']").text price = self.chrome.find_element_by_xpath("//*[@class='p-price']").text subtitle = self.chrome.find_element_by_xpath("//*[@id='p-ad']").text plus_price = self.chrome.find_element_by_xpath("//*[@class='p-price-plus']").text item_info = [name, price[1:], subtitle, plus_price[1:]] # tuple cannot change plus_price logging.debug('item_info:{}, {}, {}, {}'.format(name, price, subtitle, plus_price)) logging.info('Crawl SUCCESS: {}'.format(item_info)) except NoSuchElementException as e: logging.warning('Crawl failure: {}'.format(e)) except TimeoutException as e: logging.warning('Crawl failure: {}'.format(e)) self.chrome.quit() return item_info if __name__ == '__main__': logging.basicConfig(level=logging.INFO) while True: start = time.time() # c = Crawler() c = Crawler({'http': 'http-pro.abuyun.com:9010', 'https': 'http-pro.abuyun.com:9010'}) logging.debug(c.get_jd_item('3133927')) end = time.time() print(end-start)

有問題請留言諮詢!轉載請註明!