1. 程式人生 > >python爬蟲:爬取鏈家深圳全部二手房的詳細信息

python爬蟲:爬取鏈家深圳全部二手房的詳細信息

data sts rip 二手房 lse area 列表 dom bubuko

1、問題描述:

爬取鏈家深圳全部二手房的詳細信息,並將爬取的數據存儲到CSV文件中

2、思路分析:

(1)目標網址:https://sz.lianjia.com/ershoufang/

(2)代碼結構:

class LianjiaSpider(object):

    def __init__(self):

    def getMaxPage(self, url): # 獲取maxPage

    def parsePage(self, url): # 解析每個page,獲取每個huose的Link

    def parseDetail(self, url): # 根據Link,獲取每個house的詳細信息

(3) init(self)初始化函數

· hearders用到了fake_useragent庫,用來隨機生成請求頭。
· datas空列表,用於保存爬取的數據。

def __init__(self):
    self.headers = {"User-Agent": UserAgent().random}
    self.datas = list()

(4) getMaxPage()函數

主要用來獲取二手房頁面的最大頁數.
技術分享圖片

def getMaxPage(self, url):
    response = requests.get(url, headers = self.headers)
    if response.status_code == 200:
        source = response.text
        soup = BeautifulSoup(source, "html.parser")
        pageData = soup.find("div", class_ = "page-box house-lst-page-box")["page-data"]
        # pageData = ‘{"totalPage":100,"curPage":1}‘,通過eval()函數把字符串轉換為字典
        maxPage = eval(pageData)["totalPage"]
        return  maxPage
    else:
        print("Fail status: {}".format(response.status_code))
        return None

(5)parsePage()函數

主要是用來進行翻頁的操作,得到每一頁的所有二手房的Links鏈接。它通過利用一個for循環來重構 url實現翻頁操作,而循環最大頁數就是通過上面的 getMaxPage() 來獲取到。

def parsePage(self, url):
    maxPage = self.getMaxPage(url)
    #  解析每個page,獲取每個二手房的鏈接
    for pageNum in range(1, maxPage+1 ):
        url = "https://sz.lianjia.com/ershoufang/pg{}/".format(pageNum)
        print("當前正在爬取: {}".format(url))
        response = requests.get(url, headers = self.headers)
        soup = BeautifulSoup(response.text, "html.parser")
        links = soup.find_all("div", class_ = "info clear")
        for i in links:
            link = i.find("a")["href"]    #每個<info clear>標簽有很多<a>,而我們只需要第一個,所以用find
            detail = self.parseDetail(link)
            self.datas.append(detail)

(6)parseDetail()函數

根據parsePage()函數獲取的二手房Link鏈接,向該鏈接發送請求,獲取出詳細頁面信息。

def parseDetail(self, url):
    response = requests.get(url, headers = self.headers)
    detail = {}
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, "html.parser")
        detail["價格"] = soup.find("span", class_ = "total").text
        detail["單價"] = soup.find("span", class_ = "unitPriceValue").text
        detail["小區"] = soup.find("div", class_ = "communityName").find("a", class_ = "info").text
        detail["位置"] = soup.find("div", class_="areaName").find("span", class_="info").text
        detail["地鐵"] = soup.find("div", class_="areaName").find("a", class_="supplement").text
        base = soup.find("div", class_ = "base").find_all("li") # 基本信息
        detail["戶型"] = base[0].text[4:]
        detail["面積"] = base[2].text[4:]
        detail["朝向"] = base[6].text[4:]
        detail["電梯"] = base[10].text[4:]
        return detail
    else:
        return None

(7)將數據存儲到CSV文件中

這裏用到了 pandas 庫的 DataFrame() 方法,它默認的是按照列名的字典順序排序的。想要自定義列的順序,可以加columns字段。

    #  將所有爬取的二手房數據存儲到csv文件中
    data = pd.DataFrame(self.datas)
    # columns字段:自定義列的順序(DataFrame默認按列名的字典序排序)
    columns = ["小區", "戶型", "面積", "價格", "單價", "朝向", "電梯", "位置", "地鐵"]
    data.to_csv(".\Lianjia_II.csv", encoding=‘utf_8_sig‘, index=False, columns=columns)

3、效果展示

技術分享圖片

4、完整代碼:

# -* coding: utf-8 *-
#author: wangshx6
#data: 2018-11-07
#descriptinon: 爬取鏈家深圳全部二手房的詳細信息,並將爬取的數據存儲到CSV文

import requests
from bs4 import BeautifulSoup
import pandas as pd
from fake_useragent import UserAgent

class LianjiaSpider(object):

    def __init__(self):
        self.headers = {"User-Agent": UserAgent().random}
        self.datas = list()

    def getMaxPage(self, url):
        response = requests.get(url, headers = self.headers)
        if response.status_code == 200:
            source = response.text
            soup = BeautifulSoup(source, "html.parser")
            pageData = soup.find("div", class_ = "page-box house-lst-page-box")["page-data"]
            # pageData = ‘{"totalPage":100,"curPage":1}‘,通過eval()函數把字符串轉換為字典
            maxPage = eval(pageData)["totalPage"]
            return  maxPage
        else:
            print("Fail status: {}".format(response.status_code))
            return None


    def parsePage(self, url):
        maxPage = self.getMaxPage(url)
        #  解析每個page,獲取每個二手房的鏈接
        for pageNum in range(1, maxPage+1 ):
            url = "https://sz.lianjia.com/ershoufang/pg{}/".format(pageNum)
            print("當前正在爬取: {}".format(url))
            response = requests.get(url, headers = self.headers)
            soup = BeautifulSoup(response.text, "html.parser")
            links = soup.find_all("div", class_ = "info clear")
            for i in links:
                link = i.find("a")["href"]    #每個<info clear>標簽有很多<a>,而我們只需要第一個,所以用find
                detail = self.parseDetail(link)
                self.datas.append(detail)

        #  將所有爬取的二手房數據存儲到csv文件中
        data = pd.DataFrame(self.datas)
        # columns字段:自定義列的順序(DataFrame默認按列名的字典序排序)
        columns = ["小區", "戶型", "面積", "價格", "單價", "朝向", "電梯", "位置", "地鐵"]
        data.to_csv(".\Lianjia_II.csv", encoding=‘utf_8_sig‘, index=False, columns=columns)


    def parseDetail(self, url):
        response = requests.get(url, headers = self.headers)
        detail = {}
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, "html.parser")
            detail["價格"] = soup.find("span", class_ = "total").text
            detail["單價"] = soup.find("span", class_ = "unitPriceValue").text
            detail["小區"] = soup.find("div", class_ = "communityName").find("a", class_ = "info").text
            detail["位置"] = soup.find("div", class_="areaName").find("span", class_="info").text
            detail["地鐵"] = soup.find("div", class_="areaName").find("a", class_="supplement").text
            base = soup.find("div", class_ = "base").find_all("li") # 基本信息
            detail["戶型"] = base[0].text[4:]
            detail["面積"] = base[2].text[4:]
            detail["朝向"] = base[6].text[4:]
            detail["電梯"] = base[10].text[4:]
            return detail
        else:
            return None

if __name__ == "__main__":
    Lianjia = LianjiaSpider()
    Lianjia.parsePage("https://sz.lianjia.com/ershoufang/")

python爬蟲:爬取鏈家深圳全部二手房的詳細信息