1. 程式人生 > >簡易JAVA爬蟲練習,為新手總結的三種爬蟲方法

簡易JAVA爬蟲練習,為新手總結的三種爬蟲方法

這是想學習java爬蟲的新手必經之路,也是最簡單的幾種JAVA爬蟲爬取網頁資訊的方法,當然,這幾種方法爬取的網頁有限,對於需要登入的網頁則還需進行更復雜的操作,這裡就不做多餘的解釋,畢竟是寫給新手的,希望對剛學習JAVA爬蟲的人能有點幫助。

一、 通過urlconnection抓取資訊:


步驟:
1.獲取url
2.獲取http請求
3.獲取狀態碼
4.根據狀態嗎返回資訊。

程式碼:
package com.soft.crawler;


import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import sun.net.www.protocol.http.HttpURLConnection;


public class Crawler {
public static void main(String[] args){
String r;
//1.新建url物件,表示要訪問的網頁
try {
URL url = new URL("http://www.sina.com.cn");
//2.建立http連線,返回連線物件urlconnection
HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection();
//3.獲取相應的http狀態碼,
int responseCode= urlConnection.getResponseCode();
//4.如果獲取成功,從URLconnection物件獲取輸入流來獲取請求網頁的原始碼
if(responseCode == 200){
BufferedReader reader = new BufferedReader(new InputStreamReader(urlConnection.getInputStream(), "utf-8"));
while((r=reader.readLine())!=null){
System.out.println(r);
}
}else{
System.out.println("獲取不到原始碼 ,伺服器響應程式碼為:"+responseCode);
}
} catch (Exception e) {
System.out.println("獲取不到網頁原始碼:"+e);
}
}
}




二、通過httpclient抓取資訊:



步驟:
//建立一個客戶端,類似開啟一個瀏覽器


HttpClient httpClient = new HttpClient();


//建立一個get方法,類似在瀏覽器中輸入一個地址,path則為URL的值


GetMethod getMethod = new GetMethod(path);


//獲得響應的狀態碼


int statusCode = httpClient.executeMethod(getMethod);


//得到返回的類容


String resoult = getMethod.gerResponseBodyAsString();


//釋放資源


getMethod.releaseConnection();





程式碼:

import java.io.FileWriter;
import java.io.IOException;
import java.util.Scanner;

import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpException;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.methods.GetMethod;


public class Crawler{
private static HttpClient httpClient = new HttpClient();
static GetMethod getmethod;
public static boolean downloadPage(String path) throws HttpException,
IOException {
getmethod = new GetMethod(path);
//獲得響應狀態碼
int statusCode = httpClient.executeMethod(getmethod);
if(statusCode == HttpStatus.SC_OK){
System.out.println("response="+getmethod.getResponseBodyAsString());
//寫入本地檔案
FileWriter fwrite = new FileWriter("hello.txt");
String pageString = getmethod.getResponseBodyAsString();
getmethod.releaseConnection();
fwrite.write(pageString,0,pageString.length());
fwrite.flush();
//關閉檔案
fwrite.close();
//釋放資源
return true;
}
return false;
}




/**
* 測試程式碼
*/
public static void main(String[] args) {
// 抓取制指定網頁,並將其輸出
try {
Scanner in = new Scanner(System.in);
System.out.println("Input the URL of the page you want to get:");
String path = in.next();
System.out.println("program start!");
Crawler.downloadPage(path);
System.out.println("Program end!");
} catch (HttpException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
}




三,通過jsoup獲取網頁資訊:



package com.soft.test;


import java.io.File;
import java.io.IOException;


import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;


public class spider {
public static void main(String[] args) throws IOException{
String url = "http://www.baidu.com";
Document document = Jsoup.connect(url).timeout(3000).get();


//通過Document的select方法獲取屬性結點集合
Elements elements = document.select("a");
//得到節點的第一個物件
//Element element = elements.get(0);
System.out.println(element);

}

}