1. 程式人生 > >Python + selenium 爬取百度文庫Word文字

Python + selenium 爬取百度文庫Word文字

 1 # -*- coding:utf-8 -*-
 2  
 3 import time
 4 from selenium import webdriver
 5 from selenium.webdriver.chrome.options import Options
 6 from selenium.common.exceptions import NoSuchElementException
 7  
 8 chrome_options = Options()
 9 chrome_options.add_argument('--headless')
10 chrome_options.add_argument('
--disable-gpu') 11 chrome_options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36") 12 13 driver = webdriver.Chrome(chrome_options=chrome_options) 14 driver.maximize_window() 15 16 url = input("輸入文件連結,搞快點:") 17 driver.get(url)
18 19 error_str = "" 20 21 try : 22 page_num = driver.find_element_by_xpath("//span[@class='page-count']").text 23 24 find_button = driver.find_element_by_xpath("//div[@class='doc-banner-text']") 25 driver.execute_script("arguments[0].scrollIntoView();", find_button) 26 button = driver.find_element_by_xpath("
//span[@class='moreBtn goBtn']") 27 button.click() 28 29 for i in range(1,int(page_num.strip('/')) + 1) : 30 page = driver.find_element_by_xpath("//div[@data-page-no='{}']".format(i)) 31 driver.execute_script("arguments[0].scrollIntoView();", page) 32 time.sleep(0.3) 33 print(driver.find_elements_by_xpath("//div[@data-page-no='{}']//div[@class='reader-txt-layer']".format(i))[-1].text) 34 35 except NoSuchElementException : 36 if driver.find_element_by_xpath("//div[@class='doc-bottom-text']").text == "試讀已結束,如需繼續閱讀或下載" : 37 error_str = "\n------------------------------------------------------------------\n\n" \ 38 "----------百度文庫提示試讀已結束啦,無法爬取全文,等會再試試吧----------\n\n" \ 39 "------------------------------------------------------------------" 40 41 finally : 42 print(error_str)