分散式scrapy+redis 爬取房天下租房資訊
阿新 • • 發佈:2018-12-05
利用scrapy框架結合redis分散式爬蟲
#建立專案
scrapy startproject homepro
#根據提示進入指定目錄建立爬蟲
scrapy genspider home example.com #spider爬蟲
scrapy genspider -t crawl home example #crawlspider爬蟲
其他不說直接行程式碼
items.py程式碼段
import scrapy class HomeproItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() city = scrapy.Field() title = scrapy.Field() # 名字 rentway = scrapy.Field() # 出租方式 price = scrapy.Field() housetype = scrapy.Field() # 戶型 area = scrapy.Field() # 面積 address = scrapy.Field() # 地址 traffic = scrapy.Field() # 交通
#piplines.py程式碼段
from scrapy.utils.project import get_project_settings class HomeproPipeline(object): def process_item(self, item, spider): return item import sqlite3 from scrapy.utils.project import get_project_settings class Sqlite3Pipeline(object): def open_spider(self,spider): #可以在settings指定sqlite資料庫以及表名 # settings = get_project_settings() # name = settings['SQLITE_TABLE'] # self.db = sqlite3.connect(settings['SQLITE_FILE']) #指定資料庫 self.db = sqlite3.connect(home.db) self.cur = self.db.cursor() def close_spider(self,spider): self.db.close() def process_item(self,item,spider): self.save_to_sqlite(item) return item def save_to_sqlite(self, item): # 拼接sql語句 #表名直接指定就行,可以先把程式碼寫好執行一下程式,會在當前路徑下生成一個數據庫,連結當前路徑下的資料庫,然後手動建立一張表 #表名與欄位要與下面的保持一致 sql = 'insert into dameo(city,title,rentway,price,housetype,area,address,traffic) values("%s","%s","%s","%s","%s","%s","%s","%s")' % ( item['city'], item['title'], item['rentway'], item['price'], item['housetype'], item['area'],item['address'], item['traffic']) # 執行sql語句 try: self.cur.execute(sql) self.db.commit() except Exception as e: print(e) self.db.rollback() return item #解決衝突問題 # sql = 'insert into dameo(city,title,rentway,price,housetype,area,address,traffic) values(?,?,?,?,?,?,?,?)' # param = (item['city'], item['title'], item['rentway'], item['price'], item['housetype'], item['area'],item['address'], item['traffic']) # self.cur.execute(sql,param)
#也可以寫入mysql
import pymysql class mysqlPipeline(object): def open_spider(self,spider): self.connect=pymysql.connect(host='主機',port=3306,user='使用者名稱t',pwd='密碼',database='資料庫',charset='utf8') def close_spider(self,spider): self.connect.close() def process_item(self,item,spider): self.save_mysql(item) return item def save_mysql(self,item): cursor=self.connect.cursor() sql='insert into zufang()' try: cursor.execute(sql) self.connect.commit() except Exception as e: print(e) self.connect.rollback()
#寫入mongodb
import pymongo
class mongodbPipeline(object):
def open_spider(self,spider):
self.client=pymongo.MongoClient(host='主機',port=27017)
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
#資料庫
db=self.client.anjuke
#集合
clo=db.zufang
clo.insert(dict(item))
return item
# -*- coding: utf-8 -*-
# Scrapy settings for homepro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# 指定使用scrapy-redis的排程器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 指定使用scrapy-redis的去重
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
# 指定排序爬取地址時使用的佇列,
# 預設的 按優先順序排序(Scrapy預設),由sorted set實現的一種非FIFO、LIFO方式。
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
BOT_NAME = 'homepro'
SPIDER_MODULES = ['homepro.spiders']
NEWSPIDER_MODULE = 'homepro.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'homepro.middlewares.HomeproSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'homepro.middlewares.HomeproDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'homepro.pipelines.HomeproPipeline': 300,
# 'scrapy_redis.pipelines.RedisPipeline': 300,
'homepro.pipelines.Sqlite3Pipeline': 301,
}
SQLITE_FILE = 'sqlite.db'
SQLITE_TABLE = 'dameo'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# REDIS_HOST = '10.8.153.73'
# REDIS_PORT = 6379
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, sdch'
}
#爬蟲程式碼
# -*- coding: utf-8 -*-
import scrapy
from homepro.items import HomeproItem
from scrapy_redis.spiders import RedisCrawlSpider
# scrapy.Spider
class HomeSpider(RedisCrawlSpider):
name = 'home'
allowed_domains = ['zu.fang.com']
# start_urls = ['http://zu.fang.com/cities.aspx']
redis_key = 'homespider:start_urls'
def parse(self, response):
hrefs = response.xpath('//div[@class="onCont"]/ul/li/a/@href').extract()
for href in hrefs:
href = 'http:'+ href
yield scrapy.Request(url=href,callback=self.parse_city,dont_filter=True)
def parse_city(self, response):
page_num = response.xpath('//div[@id="rentid_D10_01"]/span[@class="txt"]/text()').extract()[0].strip('共頁')
# print('*' * 100)
# print(page_num)
# print(response.url)
for page in range(1, int(page_num)):
if page == 1:
url = response.url
else:
url = response.url + 'house/i%d' % (page + 30)
print('*' * 100)
print(url)
yield scrapy.Request(url=url, callback=self.parse_houseinfo, dont_filter=True)
def parse_houseinfo(self, response):
divs = response.xpath('//dd[@class="info rel"]')
for info in divs:
city = info.xpath('//div[@class="guide rel"]/a[2]/text()').extract()[0].rstrip("租房")
title = info.xpath('.//p[@class="title"]/a/text()').extract()[0]
rentway = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[0].extract().replace(" ", '').lstrip('\r\n')
housetype = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[1].extract().replace(" ", '')
area = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[2].extract().replace(" ", '')
addresses = info.xpath('.//p[@class ="gray6 mt12"]//span/text()').extract()
address = '-'.join(i for i in addresses)
try:
des = info.xpath('.//p[@class ="mt12"]//span/text()').extract()
traffic = '-'.join(i for i in des)
except Exception as e:
traffic = "暫無詳細資訊"
p_name = info.xpath('.//div[@class ="moreInfo"]/p/text()').extract()[0]
p_price = info.xpath('.//div[@class ="moreInfo"]/p/span/text()').extract()[0]
price = p_price + p_name
item = HomeproItem()
item['city'] = city
item['title'] = title
item['rentway'] = rentway
item['price'] = price
item['housetype'] = housetype
item['area'] = area
item['address'] = address
item['traffic'] = traffic
yield item
然後把程式碼發給其他附屬機器,分別啟動.子程式redis連結主伺服器redis
redis-cli -h 主伺服器ip
#主伺服器先啟動redis-server
#在啟動redis-cli
lpush homespider:start_urls 起始的url