1. 程式人生 > >GuozhongCrawler看準網爬蟲動態切換IP漫爬蟲

GuozhongCrawler看準網爬蟲動態切換IP漫爬蟲

input arraylist [] java resource popu pre puts oschina

有些關於URL去重的方面代碼沒有提供,須要自己去實現。主要這裏提供思路

項目地址:http://git.oschina.net/woshidaniu/GuozhongCrawler/tree/master/example/changeProxyIp/

首先爬蟲入口類:


public class PervadeSpider {

public static void main(String[] args) {
CrawTaskBuilder builder = CrawlManager.getInstance()
.prepareCrawlTask("看準網漫爬蟲", DefaultPageDownloader.class)
.useThread(200)// 使用多個線程下載
.useDynamicEntrance(DynamicEntranceImpl.class)
.useProxyIpPool(KanzhunProxyIpPool.class, 800, 1000 * 60 * 20, 30)
.useQueueSimpleBlockingRequest()//採用廣度優先策略,當然redis隊列也是fifo。

假設想做分布式爬蟲的話能夠設置redis隊列
.usePageEncoding(PageEncoding.UTF8);
CrawlTask spider = builder.build();
CrawlManager.getInstance().start(spider);
}

public static final class DynamicEntranceImpl extends DynamicEntrance{

@Override
public List<StartContext> loadStartContext() {
StartContext context = new StartContext();
context.injectSeed(context.createPageRequest("http://www.kanzhun.com/companyl/search/?

ka=banner-com", ExtractUrlPageProcessor.class));//公司
context.injectSeed(context.createPageRequest("http://www.kanzhun.com/salaryl/search/?stype=&ka=banner-salary", ExtractUrlPageProcessor.class));//工資
context.injectSeed(context.createPageRequest("http://www.kanzhun.com/jobl/p/?

ka=banner-recruit", ExtractUrlPageProcessor.class));//招聘
context.injectSeed(context.createPageRequest("http://www.kanzhun.com/interviewl/search/?stype=&ka=banner-interview", ExtractUrlPageProcessor.class));//面試
context.injectSeed(context.createPageRequest("http://www.kanzhun.com/topic/100.html?ka=com-topic-1", ExtractUrlPageProcessor.class));//公司之最
return Arrays.asList(context);
}

}
}


動態代理IP提供類:

public class KanzhunProxyIpPool extends ProxyIpPool {
public static final String IP_RESOURCE = "地址忽略";//地址請求的個數必須設置為initProxyIp(int size)中size的個數
public KanzhunProxyIpPool(int initSize, long pastTime, int max_use_count) {
super(initSize, pastTime, max_use_count);
}

private Pattern extractIp = Pattern.compile("([\\d]{1,3}\\.[\\d]{1,3}\\.[\\d]{1,3}\\.[\\d]{1,3}):(\\d+)");

@Override
protected List<ProxyIp> initProxyIp(int size) throws Exception {
List<ProxyIp> ip = new ArrayList<ProxyIp>();
URL url = null;
BufferedReader br = null;
StringBuffer buf = new StringBuffer();
try {
url = new URL(IP_RESOURCE);
InputStream in = url.openStream();
br = new BufferedReader(new InputStreamReader(in,"utf-8"));
String temp = null;

while((temp = br.readLine())!=null){
buf.append(temp).append("\n");
}

ProxyIp proxy = null;
Matcher matcher = extractIp.matcher(buf);
while(matcher.find()){
proxy = new ProxyIp(matcher.group(1), Integer.parseInt(matcher.group(2)));
ip.add(proxy);
}
} catch (Exception e) {
e.printStackTrace();
System.out.println(buf);
}finally{
if(br != null){
try {
br.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return ip;
}

}


漫爬頁面處理類:

public class ExtractUrlPageProcessor implements PageProcessor {



private String domain = "http://www.kanzhun.com";

private List<URLFilter> urlFilters = new ArrayList<URLFilter>();

private List<Pattern> urlRegulars = null;

public ExtractUrlPageProcessor(){
System.out.println("載入漫爬抽取URL正則");
urlRegulars = ConfigReader.getExtractRegular();
System.out.println("載入漫爬規則完成");
addURLFilter(new URLFilter() {

@Override
public boolean filter(String url) {
return !url.contains("javascript");//去除jquery標簽
}
});
addURLFilter(new URLFilter() {

@Override
public boolean filter(String url) {
return url.contains("http://www.kanzhun.com");//保證域名內URL
}
});
addURLFilter(new URLFilter() {

@Override
public boolean filter(String url) {
for (Pattern pattern : urlRegulars) {
boolean result = pattern.matcher(url).find();
if(result)
return true;
}
return false;//保證url符合urlRegulars裏隨意一個正則
}
});

}

/**
* kanzhunId抽取正則
*/
final static Pattern[] idExtracts = new Pattern[]{
Pattern.compile("gso(\\d+)[^\\d]+"),//簡單介紹抽取公司id
Pattern.compile("gsr(\\d+)[^\\d]+"),//點評抽取公司id
Pattern.compile("gsm(\\d+)[^\\d]+"),//面試抽取公司id
Pattern.compile("gsx(\\d+)[^\\d]+"),//工資抽取公司id
Pattern.compile("g(\\d+)[^\\d]+"),//招聘抽取公司id
Pattern.compile("gsp(\\d+)[^\\d]+"),//照片抽取公司id
Pattern.compile("gsl(\\d+)[^\\d]+")//員工抽取公司id
};

@Override
public PageScript getJavaScript() {
// TODO Auto-generated method stub
return null;
}


private Pattern normalContain = Pattern.compile("看準網");

@Override
public Pattern getNormalContain() {
return normalContain;
}

@Override
public void process(OkPage page,StartContext context,List<BasicRequest> queue,List<Proccessable> objectContainer) throws Exception {
// TODO Auto-generated method stub
/**
* 每一個頁面抽取的符合urlRegulars規則的url
*/
Set<String> extractUrls = new HashSet<String>();

/**
* 每一個頁面全部的kanzhunId
* 這裏解釋下比方阿裏巴巴的首頁是http://www.kanzhun.com/gso9012.html?ka=com1-title
* 那個阿裏巴巴的kanzhunId就是9012
* 我們能夠依據這個推導出阿裏巴巴的
* 點評頁:http://www.kanzhun.com/gsr9012.html?ka=com-blocker1-review
* 面試頁:http://www.kanzhun.com/gsm9012.html?ka=com-blocker1-interview
* 工資頁:http://www.kanzhun.com/gsx9012.html?ka=com-blocker1-salary
* 招聘頁:http://www.kanzhun.com/job/g9012.html?ka=com-blocker1-job
* 照片頁:http://www.kanzhun.com/gsp9012.html?ka=com-blocker1-photo
* 員工頁:http://www.kanzhun.com/gsl9012.html?ka=com-blocker1-employee
*
*/
Set<String> kanzhunIds = new HashSet<String>();

Document doc = Jsoup.parse(page.getContent());
Elements allLinks = doc.select("a");
String href = null;
for (Element link : allLinks) {
href = link.attr("href");
if(href.startsWith("/")){
href = domain+href;
}
if(pass(href)){
extractUrls.add(href);
}
//抽取頁面全部包括的kanzhunID
for (Pattern pattern : idExtracts) {
Matcher matcher = pattern.matcher(href);
if(matcher.find()){
kanzhunIds.add(matcher.group(1));
}
}
}

//step1
System.out.println(page.getRequest().getUrl()+"抽取了URL"+extractUrls.size()+"個:");
//對url去重(這個須要你們自己實現這裏用偽代碼表示)
System.out.println("去出反復url...");

//step2
System.out.println(page.getRequest().getUrl()+"抽取了kanzhunId"+kanzhunIds.size()+"個:");
//對抓過的kanzhunId進行去重(這個須要你們自己實現這裏用偽代碼表示)
System.out.println("kanzhunId進行去重...");


//將抽取的URL增加到隊列
for (String extractUrl:extractUrls) {
PageRequest pageRequest = context.createPageRequest(extractUrl, ExtractUrlPageProcessor.class);
queue.add(pageRequest);//加到隊列
}

//將抽取的kanzhunId封裝成每一個企業的主頁URL。

抓取企業信息
for (String kanzhunId:kanzhunIds) {
PageRequest pageRequest = context.createPageRequest("http://www.kanzhun.com/gso"+kanzhunId+".html?

ka=com1-title", CompanyPageProcessor.class);
queue.add(pageRequest);//加到隊列
}

}

@Override
public void processErrorPage(Page arg0, StartContext arg1) throws Exception {
// TODO Auto-generated method stub

}

/**
* 對每一個URL進行filter
* @param url
* @return
*/
private boolean pass(String url){
for (URLFilter filter : urlFilters) {
if(!filter.filter(url)){
return false;
}
}
return true;
}



public void addURLFilter(URLFilter urlFilter){
urlFilters.add(urlFilter);
}
}

最後URL抽取規則貼上。


<ExtractRegular>
<!-- 行業URL 或者 公司標簽 或者 城市-->
<Regular>/pl[act][a-z0-9]+\.html</Regular>


<!-- 行業URL 或者 城市-->
<Regular>/xs[ac][a-z0-9]+\.html</Regular>


<!-- 招聘分類URL-->
<Regular>/jobli_.+</Regular>

<!-- 面試分類URL-->
<Regular>/ms[ac].+</Regular>

<!-- 公司之最-->
<Regular>/topic/[a-z0-9]+</Regular>


<!-- 熱門職位-->
<Regular>/salary/(\d+)/</Regular>
<Regular>/interview/(\d+)/</Regular>
</ExtractRegular>



搞定。

GuozhongCrawler看準網爬蟲動態切換IP漫爬蟲