1. 程式人生 > >python掃描proxy並獲取可用代理ip列表

python掃描proxy並獲取可用代理ip列表

[1] randint XP self. 希望 agen 不可用 今天 只需要

mac或linux下可以work的代碼如下:

# coding=utf-8

import requests
import re
from bs4 import BeautifulSoup as bs
import Queue
import threading
import random
import re

headers_useragents = []
headers_referers = []
headers_referers.append(‘http://www.google.com/?q=‘)
headers_referers.append(‘http://www.usatoday.com/search/results?q=‘)
headers_referers.append(‘http://engadget.search.aol.com/search?q=‘)
headers_useragents.append(‘Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3‘)
headers_useragents.append(
    ‘Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)‘)
headers_useragents.append(
    ‘Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)‘)
headers_useragents.append(‘Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1‘)
headers_useragents.append(
    ‘Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1‘)
headers_useragents.append(
    ‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)‘)
headers_useragents.append(
    ‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)‘)
headers_useragents.append(‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)‘)
headers_useragents.append(
    ‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)‘)
headers_useragents.append(‘Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)‘)
headers_useragents.append(‘Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)‘)
headers_useragents.append(‘Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51‘)


class proxyPick(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self._queue = queue

    def run(self):
        while not self._queue.empty():
            url = self._queue.get()

            proxy_spider(url)


def proxy_spider(url):
    headers = {
        #  .......
    }
    headers[‘User-Agent‘] = random.choice(headers_useragents)
    headers[‘Cache-Control‘] = ‘no-cache‘
    headers[‘Accept-Charset‘] = ‘ISO-8859-1,utf-8;q=0.7,*;q=0.7‘
    headers[‘Referer‘] = random.choice(headers_referers) + str(random.randint(5, 10))
    headers[‘Keep-Alive‘] = str(random.randint(110, 120))
    headers[‘Connection‘] = ‘keep-alive‘
    r = requests.get(url=url, headers=headers)
    soup = bs(r.content, "html.parser")
    data = soup.find_all(name=‘tr‘, attrs={‘class‘: re.compile(‘|[^odd]‘)})

    for i in data:
        soup = bs(str(i), ‘html.parser‘)
        data2 = soup.find_all(name=‘td‘)
        ip = str(data2[1].string)
        port = str(data2[2].string)
        types = str(data2[5].string).lower()

        proxy = {}
        proxy[types] = ‘%s:%s‘ % (ip, port)
        print proxy, " check proxy"
        try:
            proxy_check(proxy, ip)
        except Exception, e:
            print e
            pass


def proxy_check(proxy, ip):
    # url = ‘http://1212.ip138.com/ic.asp‘
    # url = ‘https://www.ipip.net/ip.html‘
    # url = ‘http://www.baid.com‘
    # url = ‘http://ip138.com/‘
    url = ‘http://2018.ip138.com/ic.asp‘
    r = requests.get(url=url, proxies=proxy, timeout=6)
    # r.encoding = ‘gb2312‘ for url = ‘http://ip138.com/‘
    reip = r‘\[(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\]‘
    # print r.text
    f = open(‘ip_proxy.txt‘, ‘a+‘)
    found = re.search(reip, r.text, re.M | re.I)
    if found:
        ip2 = found.group(1)
        print "ip==> : ", ip2
        if ip2 == ip:
            print "*" * 30
            print "ip is wanted:", ip
            f.write(‘%s‘ % proxy + ‘\n‘)
            print "*" * 30
    # import sys
    # sys.exit(0)
    f.close()


# proxy_spider()

def main():
    queue = Queue.Queue()
    for i in range(1, 2288):
        queue.put(‘http://www.xicidaili.com/nn/‘ + str(i))

    threads = []
    thread_count = 10

    for i in range(thread_count):
        spider = proxyPick(queue)
        threads.append(spider)

    for i in threads:
        i.start()

    for i in threads:
        i.join()

    print "It‘s down,sir!"


if __name__ == ‘__main__‘:
    main()

python掃描proxy並獲取可用代理ip的實例

from:https://www.jb51.net/article/120480.htm 下面小編就為大家帶來一篇python掃描proxy並獲取可用代理ip的實例。小編覺得挺不錯的,現在就分享給大家,也給大家做個參考。一起跟隨小編過來看看吧

今天咱寫一個挺實用的工具,就是掃描並獲取可用的proxy

首先呢,我先百度找了一個網站:http://www.xicidaili.com 作為例子

這個網站裏公布了許多的國內外可用的代理的ip和端口

我們還是按照老樣子進行分析,就先把所有國內的proxy掃一遍吧

點開國內部分進行審查發現,國內proxy和目錄為以下url:

http://www.xicidaili.com/nn/x

這個x差不多兩千多頁,那麽看來又要線程處理了。。。

老樣子,我們嘗試是否能直接以最簡單的requests.get()獲取內容

返回503,那麽我們加一個簡單的headers

返回200,成咯

好了我們先進行網頁內容分析並獲取想要的內容

我們發現,包含ip信息的內容在<tr>標簽內,於是我們就能很方便的用bs進行獲取標簽內容

但是我們隨之又發現,ip、端口、協議的內容分別在提取的<tr>標簽的第2,3,6三個<td>標簽內

r = requests.get(url = url,headers = headers) soup = bs(r.content,"html.parser") data = soup.find_all(name = ‘tr‘,attrs = {‘class‘:re.compile(‘|[^odd]‘)}) for i in data: soup = bs(str(i),‘html.parser‘) data2 = soup.find_all(name = ‘td‘) ip = str(data2[1].string) port = str(data2[2].string) types = str(data2[5].string).lower() proxy = {} proxy[types] = ‘%s:%s‘%(ip,port)

這樣,我們每次循環都能生成對應的proxy字典,以便我們接下來驗證ip可用性所使用

字典這兒有個註意點,我們有一個將types變為小寫的操作,因為在get方法中的proxies中寫入的協議名稱應為小寫,而網頁抓取的是大寫的內容,所以進行了一個大小寫轉換

那麽驗證ip可用性的思路呢

很簡單,我們使用get,加上我們的代理,請求網站:

http://1212.ip138.com/ic.asp

這是一個神奇的網站,能返回你的外網ip是什麽

url = ‘http://1212.ip138.com/ic.asp‘
r = requests.get(url = url,proxies = proxy,timeout = 6)

這裏我們需要加上timeout去除掉那些等待時間過長的代理,我設置為6秒

我們以一個ip進行嘗試,並且分析返回的頁面

返回的內容如下:

<html>

<head>

<meta xxxxxxxxxxxxxxxxxx>

<title> 您的IP地址 </title>

</head>

<body style="margin:0px"><center>您的IP是:[xxx.xxx.xxx.xxx] 來自:xxxxxxxx</center></body></html>

那麽我們只需要提取出網頁內[]的內容即可

如果我們的代理可用,就會返回代理的ip

(這裏會出現返回的地址還是我們本機的外網ip的情況,雖然我也不是很清楚,但是我把這種情況排除,應該還是代理不可用)

那麽我們就能進行一個判斷,如果返回的ip和proxy字典中的ip相同,則認為這個ip是可用的代理,並將其寫入文件

我們的思路就是這樣,最後進行queue和threading線程的處理即可

上代碼:

#coding=utf-8

import requests
import re
from bs4 import BeautifulSoup as bs
import Queue
import threading 

class proxyPick(threading.Thread):
 def __init__(self,queue):
  threading.Thread.__init__(self)
  self._queue = queue

 def run(self):
  while not self._queue.empty():
   url = self._queue.get()

   proxy_spider(url)

def proxy_spider(url):
 headers = {
   .......
  }

 r = requests.get(url = url,headers = headers)
 soup = bs(r.content,"html.parser")
 data = soup.find_all(name = ‘tr‘,attrs = {‘class‘:re.compile(‘|[^odd]‘)})

 for i in data:

  soup = bs(str(i),‘html.parser‘)
  data2 = soup.find_all(name = ‘td‘)
  ip = str(data2[1].string)
  port = str(data2[2].string)
  types = str(data2[5].string).lower() 


  proxy = {}
  proxy[types] = ‘%s:%s‘%(ip,port)
  try:
   proxy_check(proxy,ip)
  except Exception,e:
   print e
   pass

def proxy_check(proxy,ip):
 url = ‘http://1212.ip138.com/ic.asp‘
 r = requests.get(url = url,proxies = proxy,timeout = 6)

 f = open(‘E:/url/ip_proxy.txt‘,‘a+‘)

 soup = bs(r.text,‘html.parser‘)
 data = soup.find_all(name = ‘center‘)
 for i in data:
  a = re.findall(r‘\[(.*?)\]‘,i.string)
  if a[0] == ip:
   #print proxy
   f.write(‘%s‘%proxy+‘\n‘)
   print ‘write down‘
   
 f.close()

#proxy_spider()

def main():
 queue = Queue.Queue()
 for i in range(1,2288):
  queue.put(‘http://www.xicidaili.com/nn/‘+str(i))

 threads = []
 thread_count = 10

 for i in range(thread_count):
  spider = proxyPick(queue)
  threads.append(spider)

 for i in threads:
  i.start()

 for i in threads:
  i.join()

 print "It‘s down,sir!"

if __name__ == ‘__main__‘:
 main()

這樣我們就能把網站上所提供的能用的代理ip全部寫入文件ip_proxy.txt文件中了

以上這篇python掃描proxy並獲取可用代理ip的實例就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支持腳本之家。

python掃描proxy並獲取可用代理ip列表