1. 程式人生 > >python—urllib庫的使用

python—urllib庫的使用

Urllib庫

"""
python 內建的HTTP請求庫
urllib.request 請求模組
urllib.error 異常處理模組
urllib.parse url解析模組
urllib.robotparser  robots.txt 解析模組
"""

# python2
import urllib2
response = urllib2.urlopen('http://www.baidu.com')
# python3
import urllib.request
resp = urllib.request.urlopen('http://www.baidu.com')

urllib的請求

# get 請求
import urllib.request
resp = urllib.request.urlopen('http://www.baidu.com')
print(resp.read().decode("utf-8"))

# post請求
import urllib.parse
import urllib.request
#http://httpbin.org/get  測試http請求
data = bytes(urllib.parse.urlencode({"world":"hello"}),encoding="utf8")
response = urllib.
request.urlopen("http://httpbin.org/get",data=data) print(response.read()) # 設定超時等待時間 import urllib.request resp = urllib.request.urlopen('http://httpbin.org/get',timeout=1) print(resp.read().decode("utf-8")) # 異常處理 import socket import urllib.request import urllib.error try: resp = urllib.request.urlopen(
'http://httpbin.org/get',timeout=0.1) except urllib.error.URLError as e: if isinstance(e.reason,socket.timeout): print("TIME OUT")

urllib 響應

# 狀態碼和響應頭
import urllib.request
resp = urllib.request.urlopen('http://httpbin.org/get',timeout=1)
print(resp.status)
print(resp.getheaders())
# 一個列表,列表裡面是一個一個的2位元祖[("server","nginx"),]
print(resp.getheader("server")) # nginx

# 響應體的內容
print(response.read()decode("utf8"))

更為複雜的請求

# 使用urllib.request 創造一個物件,傳入urlopen
import urllib.request

request = urllib.request.Request('https://python.org')
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))


# 增加請求頭和資料
from urllib import request, parse

url = 'http://httpbin.org/post'
headers = {
    'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
    'Host': 'httpbin.org'
}
dict = {
    'name': 'Germey'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))

# 另外增加請求頭的方法
from urllib import request, parse

url = 'http://httpbin.org/post'
dict = {
    'name': 'Germey'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, method='POST')
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')
response = request.urlopen(req)
print(response.read().decode('utf-8'))

設定代理

import urllib.request

proxy_handler = urllib.request.ProxyHandler({
    'http': 'http://127.0.0.1:9743',
    'https': 'https://127.0.0.1:9743'
})
opener = urllib.request.build_opener(proxy_handler)
response = opener.open('http://httpbin.org/get')
print(response.read())

cookie

# cookie獲取
import http.cookiejar, urllib.request

cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
for item in cookie:
	print(item.name+"="+item.value)
    
# 存cookie,儲存到本地txt,MozillaCookieJar格式
import http.cookiejar, urllib.request
filename = "cookie.txt"
cookie = http.cookiejar.MozillaCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
cookie.save(ignore_discard=True, ignore_expires=True)

# LWPCookieJar格式儲存cookie
import http.cookiejar, urllib.request
filename = 'cookie.txt'
cookie = http.cookiejar.LWPCookieJar(filename)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
cookie.save(ignore_discard=True, ignore_expires=True)

# 讀取剛剛儲存下來的cookie
import http.cookiejar, urllib.request
cookie = http.cookiejar.LWPCookieJar()
cookie.load('cookie.txt', ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open('http://www.baidu.com')
print(response.read().decode('utf-8'))

異常處理

# 列印異常資訊
from urllib import request, error
try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.URLError as e:
    print(e.reason)
    
# 捕捉詳細的異常資訊
from urllib import request, error

try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e: #httperror 有三個屬性
    print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e: #URLErrot 有一個屬性
    print(e.reason)
else:
    print('Request Successfully')

# 異常原因
import socket
import urllib.request
import urllib.error

try:
    response = urllib.request.urlopen('https://www.baidu.com', timeout=0.01)
except urllib.error.URLError as e:
    print(type(e.reason))
    if isinstance(e.reason, socket.timeout):
        print('TIME OUT')

urlparse

# 傳入url,將url分割
urllib.parse.urlparse(urlstring, scheme='', allow_fragments=True)
# url 拆分為標準結構,我們可以去取自己想要的片段
from urllib.parse import urlparse

result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
print(type(result), result)
"""
<class 'urllib.parse.ParseResult'> 
ParseResult(scheme='http', netloc='www.baidu.com', path='/index.html', params='user', query='id=5', fragment='comment')

協議型別  域名 地址  
params='user'這個不常用到, 
query='id=5'

fragment='comment'
#fragment: 片段ID
該部分與上面的?後面的表單資訊本質的區別就是這部分內容不會被傳遞到伺服器端。一般用於頁面的錨。就是我們常見的網站右下腳一般有一個回到頂部的按鈕,一般就是使用其實現的。
"""
# 指定協議型別
from urllib.parse import urlparse

result = urlparse('www.baidu.com/index.html;user?id=5#comment', scheme='https')
print(result)

# 如果已經指定了,後面的https不會生效
from urllib.parse import urlparse
result = urlparse('http://www.baidu.com/index.html;user?id=5#comment', scheme='https')
print(result)
# allow_fragments=False,#comment會向前拼接
from urllib.parse import urlparse

result = urlparse('http://www.baidu.com/index.html;user?id=5#comment', allow_fragments=False)
print(result)

from urllib.parse import urlparse

result = urlparse('http://www.baidu.com/index.html#comment', allow_fragments=False)
print(result)

urlunparse

"""
與 urlparse是反函式
urlparse是拆分,urlunparse則是拼接的
"""

from urllib.parse import urlunparse
data = ['http', 'www.baidu.com', 'index.html', 'user', 'a=6', 'comment']
print(urlunparse(data))
# http://www.baidu.com/index.html;user?a=6#comment

urljoin

# url拼合 拼接
from urllib.parse import urljoin

# 拼接
print(urljoin('http://www.baidu.com', 'FAQ.html'))
# 以後者為準,覆蓋掉前面
print(urljoin('http://www.baidu.com', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html?question=2'))
print(urljoin('http://www.baidu.com?wd=abc', 'https://cuiqingcai.com/index.php'))
print(urljoin('http://www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com#comment', '?category=2'))

"""
http://www.baidu.com/FAQ.html
https://cuiqingcai.com/FAQ.html
https://cuiqingcai.com/FAQ.html
https://cuiqingcai.com/FAQ.html?question=2
https://cuiqingcai.com/index.php
http://www.baidu.com?category=2#comment
www.baidu.com?category=2#comment
www.baidu.com?category=2
"""

urlencode

# 將字典轉換成url引數
from urllib.parse import urlencode

params = {
    'name': 'germey',
    'age': 22
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url)

# http://www.baidu.com?name=germey&age=22