1. 程式人生 > >爬蟲day03 request模組, Handler處理器

爬蟲day03 request模組, Handler處理器

目錄

1.requests模組

    1.常用方法         1.get():發起請求獲取響應對         2.response方法             1.response.text:字串                 字元編碼:ISO-8895-1                 response.encoding = "utf-8"             2.response.content:bytes             3.response.status_code:返回響應碼         3.get():查詢引數 params(字典格式)             1.沒有查詢引數                 res = requests.get(url,headers=headers)             2.有查詢引數                 params={"wd":"達內"}                 res = requests.get(url,params=params,headers=headers)

import requests

url = "http://www.baidu.com/s?"
headers = {"User-Agent":"Mozilla5.0/"}
s = input("請輸入要搜尋的內容:")
# get方法params引數必須要為 字典 格式,自動編碼
wd = {"wd":s}
res = requests.get(url,params=wd,headers=headers)
res.encoding = "utf-8"
print(res.text)

        4.post():引數名data             1.data={} #data引數為字典,不用轉為bytes資料型別             res = requests.post(url,data=data,headers=headers)         5.代理:proxies             1.爬蟲與反爬蟲鬥爭的第二步                 獲取代理IP的網站                 1.西刺代理                 2.快代理                 3.全網代理             2.普通代理:proxies={"協議":"IP地址:埠號"}                 proxies = {"HTTP":"171.221.239.11:808"}

import requests

url = "http://www.taobao.com/"
proxies = {"HTTP":"222.221.11.119:3128"}
headers = {"User-Agent":"Mozilla5.0/"}

res = requests.get(url,proxies=proxies,headers=headers)
res.encoding = "utf-8"
print(res.text)

            3.私密代理:                 proxies = {"HTTP":"http://309435365:[email protected]

:16819"}

import requests

url = "http://www.taobao.com/"
headers = {"User-Agent":"Mozilla5.0/"}
proxies={"HTTP":"http://309435365:[email protected]:16819"}

#114.67.228.126:16819
res = requests.get(url,proxies=proxies,headers=headers)
res.encoding = "utf-8"
print(res.status_code)

            4.案例:爬取鏈家地產二手房資訊               目標:爬取小區名稱 總價               步驟:                   1.找URL                       https://gz.fang.lianjia.com/loupan/                   2.正則匹配                   3.寫入本地檔案         6.Web客戶端驗證:auth             1. auth = ("使用者名稱","密碼")             2. 爬取http://code.tarena.com.cn                 正則:p = re.compile('<a href="\w+/">(.*?)</a>',re.S)         7.SSL證書認證:verify             1.verify=True:預設,做SSL證書認證             2.verify=False:忽略證書認證

import requests

url = "https://www.12306.cn/mormhweb/"
headers = {"User-Agent":"Mozilla5.0/"}

res = requests.get(url,verify=False,headers=headers)
res.encoding = "utf-8"
print(res.text)

2.Handler處理器(urllib.request)

    1.定義         自定義的urlopen()方法,urlopen方法是一個特殊的opener     2.常用方法         1.build_opener(Hander處理器物件)         2.opener.open(url) urlopen()     3.使用流程         1.建立相關的Handler處理器物件             http_handler = urllib.request.HTTPHander()         2.建立自定義opener物件             opener = urllib.request.build_opener(http_handler)         3.利用opener物件的open方法傳送請求

import urllib.request

url = "http://www.baidu.com/"
# 1.建立HTTPHandler處理器物件
http_hander = urllib.request.HTTPHandler()
# 2.建立自定義的opener物件
opener = urllib.request.build_opener(http_hander)
# 3.利用opener物件的open方法發請求
req = urllib.request.Request(url)
res = opener.open(req)
print(res.read().decode("utf-8"))

    4.Handler處理器分類         1.HTTPHandler()         2.ProxyHandler(代理IP)         3.ProxyBasicAuthHandler(密碼管理器物件):私密代理

import urllib.request

url = "http://www.baidu.com/"
proxy = {"HTTP":"183.62.196.10:3128"}
# 1.建立Handler
proxy_handler = urllib.request.ProxyHandler(proxy)
# 2.建立自定義opener
opener = urllib.request.build_opener(proxy_handler)
# 3.利用open方法發請求
req = urllib.request.Request(url)
res = opener.open(req)
print(res.read().decode("utf-8"))

鏈家二手房資料爬取

# 儲存到本地
import requests
import re

class LianJiaSpider:
    def __init__(self):
        self.baseurl = "https://bj.lianjia.com/ershoufang/pg"
        self.headers = {"User-Agent":"Mozilla5.0/"}
        self.proxies = {"HTTP":"http://309435365:[email protected]:16819"}
        self.page = 1
    
    # 獲取頁面
    def getPage(self,url):
        res = requests.get(url,proxies=self.proxies,headers=self.headers)
        res.encoding = "utf-8"
        html = res.text
        self.parsePage(html)
    
    # 用正則解析頁面
    def parsePage(self,html):
        p = re.compile('<div class="houseInfo">.*?data-el="region">(.*?)</a>.*?<div class="totalPrice">.*?<span>(.*?)</span>',re.S)
        r_list = p.findall(html)
        # [("首科花園","595"),(),()]
        self.writePage(r_list)
        
    # 儲存本地檔案
    def writePage(self,r_list):
        for r_tuple in r_list:# r_tuple ("首科花園","595")
            for r_str in r_tuple:
                with open("鏈家二手房.txt","a") as f:
                    f.write(r_str.strip() + "  ")
            
            with open("鏈家二手房.txt","a") as f:
                f.write("\n")
    # 主函式
    def workOn(self):
        while True:
            print("正在爬取%d頁" % self.page)
            # 拼接URL
            url = self.baseurl + str(self.page) + "/"
            self.getPage(url)
            print("第%d頁爬取成功" % self.page)
            
            c = input("是否繼續爬取(y/n):")
            if c.strip().lower() == "y":
                self.page += 1
            else:
                print("爬取結束,謝謝使用!")
                break
            
if __name__ == "__main__":
    spider = LianJiaSpider()
    spider.workOn()
 
  


# 儲存到mysql

import requests
import re
import pymysql
import warnings

class LianJiaSpider:
    def __init__(self):
        self.baseurl = "https://bj.lianjia.com/ershoufang/pg"
        self.headers = {"User-Agent":"Mozilla5.0/"}
        self.proxies = {"HTTP":"http://309435365:[email protected]:16819"}
        self.page = 1
        # 建立資料庫連線物件
        self.db = pymysql.connect("localhost","root",
                          "123456",charset="utf8")
        # 建立遊標物件
        self.cursor = self.db.cursor()
    
    # 獲取頁面
    def getPage(self,url):
        res = requests.get(url,proxies=self.proxies,headers=self.headers)
        res.encoding = "utf-8"
        html = res.text
        print("頁面已獲取,正在解析頁面...")
        self.parsePage(html)
    
    # 用正則解析頁面
    def parsePage(self,html):
        p = re.compile('<div class="houseInfo">.*?data-el="region">(.*?)</a>.*?<div class="totalPrice">.*?<span>(.*?)</span>',re.S)
        r_list = p.findall(html)
        # [("首科花園","595"),(),()]
        print("正在存入mysql資料庫...")
        self.writeToMysql(r_list)
        
    # 儲存到MySQL資料庫
    def writeToMysql(self,r_list):
        c_db = "create database if not exists spider;"
        u_db = "use spider;"
        c_tab = "create table if not exists lianjia(\
                 id int primary key auto_increment,\
                 name varchar(30),\
                 price decimal(20,2))charset=utf8;"
        # 過濾警告
        warnings.filterwarnings("error")
        try:
            self.cursor.execute(c_db)
        except Warning:
            pass

        self.cursor.execute(u_db)

        try:
            self.cursor.execute(c_tab)
        except Warning:
            pass
        
        # r_list : [("首科花園","595"),(),()]
        for r_tuple in r_list:
            s_insert = "insert into lianjia(name,price) \
                        values('%s','%s');" % \
                        (r_tuple[0].strip(),
                         float(r_tuple[1].strip())*10000)
            self.cursor.execute(s_insert)
            self.db.commit()
        print("第%d頁存入資料庫成功" % self.page)

    # 主函式
    def workOn(self):
        while True:
            print("正在爬取%d頁" % self.page)
            # 拼接URL
            url = self.baseurl + str(self.page) + "/"
            self.getPage(url)
            print("第%d頁爬取成功" % self.page)
            
            c = input("是否繼續爬取(y/n):")
            if c.strip().lower() == "y":
                self.page += 1
            else:
                print("爬取結束,謝謝使用!")
                break
            
if __name__ == "__main__":
    spider = LianJiaSpider()
    spider.workOn()