1. 程式人生 > >靜態頁面的抓取(學習簡單爬蟲)

靜態頁面的抓取(學習簡單爬蟲)

聖誕節快樂(づ ̄ 3 ̄)づ~~~

在這個半放假的日子裡,人也變得慵懶起來,在MOOC下學習了靜態頁面的簡單爬蟲(傳送門:http://www.imooc.com/learn/563),乾貨滿滿啊~~

所以爬了一個羋月傳麼麼噠~~~

# coding=utf-8
import urllib2
class UrlManager(object):

    def __init__(self):
        self.new_urls=set()
        self.old_urls=set()

    def add_new_url(self,url):
        if url is
None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self,urls): if urls is None or len(urls)==0: return for url in urls: self.add_new_url(url) def has_new_url
(self):
return len(self.new_urls)!=0 def get_new_url(self): new_url=self.new_urls.pop() self.old_urls.add(new_url) return new_url class HtmlDownloader(object): def download(self,url): if url is None: return None response = urllib2.urlopen(url) if
response.getcode()!=200: return None return response.read() from bs4 import BeautifulSoup import re import urlparse class HtmlParser(object): def _get_new_urls(self,page_url,soup): new_urls=set() #/view/123.htm links=soup.find_all('a',href=re.compile(r"/view/\d+\.htm")) for link in links: new_url = link['href'] new_full_url = urlparse.urljoin(page_url,new_url) new_urls.add(new_full_url) return new_urls def _get_new_data(self,page_url,soup): res_data={} res_data['url']=page_url #<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1> title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1") res_data['title']=title_node.get_text() summary_node=soup.find('div',class_="lemma-summary") res_data['summary']=summary_node.get_text() return res_data def parse(self,page_url,html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8') new_urls=self._get_new_urls(page_url,soup) new_data=self._get_new_data(page_url,soup) return new_urls,new_data class HtmlOutputer(object): def __init__(self): self.datas=[] def collect_data(self,data): if data is None: return self.datas.append(data) def output_html(self): fout = open('output.html','w') fout.write("<html>") fout.write("<meta charset='UTF-8'>") fout.write("<body>") fout.write("<table>") for data in self.datas: fout.write("<tr>") #asci fout.write("<td>%s</td>"% data['url']) fout.write("<td>%s</td>"% data['title'].encode('utf-8')) print data['title'].encode('utf-8') fout.write("<td>%s</td>"% data['summary'].encode('utf-8')) fout.write("</table>") fout.write("</body>") fout.write("</html>") fout.close() class SpiderMain(object): def __init__(self): self.urls = UrlManager() self.downloader=HtmlDownloader() self.parser=HtmlParser() self.outputer=HtmlOutputer() def craw(self,root_url): count=1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() print 'craw %d : %s' %(count,new_url) html_cont = self.downloader.download(new_url) new_urls,new_data=self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count==50: break count = count +1 except: print 'craw failed' self.outputer.output_html() if __name__=="__main__": root_url="http://baike.baidu.com/subview/9975572/10764629.htm" obj_spider= SpiderMain() obj_spider.craw(root_url)

有些小的細節沒有做好,所以在除錯上費了不少的功夫,還是太粗心了T^T

需要說明的一點是,跟著老師的程式碼,當寫入到output.html中出現了亂碼,但是寫到output.txt中不會有問題。查了一下是因為html頁面如果是中文也需要進行編碼的說明,因此在原來程式碼基礎上增加了

fout.write("<meta charset='UTF-8'>")

tutu
開心地去看羋月傳啦~~~~