package web.play.rss.util;
import java.io.File;
import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.Hashtable;
import web.play.rss.util.dao.SpiderDAO;
public class GetWeb {
private int webDepth = 2;// 爬虫深度
private int intThreadNum = 10;// 线程数
private String strHomePage = "";// 主页地址
private String myDomain;// 域名
private String fPath = "web";// 储存网页文件的目录名
private ArrayList<String> arrUrls = new ArrayList<String>();// 存储未处理URL
private ArrayList<String> arrUrl = new ArrayList<String>();// 存储所有URL供建立索引
private Hashtable<String, Integer> allUrls = new Hashtable<String, Integer>();// 存储所有URL的网页号
private Hashtable<String, Integer> deepUrls = new Hashtable<String, Integer>();// 存储所有URL深度
private int intWebIndex = 0;// 网页对应文件下标,从0开始
private String charset = "GBK";
private String report = "";
private long startTime;
private int webSuccessed = 0;
private int webFailed = 0;
private String searchKey = "";
private List searchedUrl = new ArrayList();//成功的url
private List searchedKey = new ArrayList();//搜索到的关键字列表
private boolean searchCurrent = false;
private boolean saveUrl = true;
private boolean saveSearchKey = true;
private boolean debug = false;
private void setDebug(boolean _d){
this.debug = _d ;
}
public void setSaveSearchKey(boolean saveSearchKey) {
this.saveSearchKey = saveSearchKey;
}
public void setSaveUrl(boolean saveUrl) {
this.saveUrl = saveUrl;
}
public void setSearchCurrent(boolean searchCurrent) {
this.searchCurrent = searchCurrent;
}
public String getSearchKey(){
return searchKey;
}
public List getSearchedKey() {
return searchedKey;
}
public List getSearchedUrl() {
return searchedUrl;
}
public GetWeb(String s,String key) {
this.strHomePage = s;
this.searchKey = key;
}
public String getHomePage() {
return this.strHomePage;
}
public GetWeb(String s, int i) {
this.strHomePage = s;
this.webDepth = i;
}
public synchronized void addWebSuccessed() {
webSuccessed++;
}
public synchronized void addWebFailed() {
webFailed++;
}
public synchronized void addReport(String s) {
try {
report += s;
PrintWriter pwReport = new PrintWriter(new FileOutputStream(
"report.txt"));
pwReport.println(report);
pwReport.close();
} catch (Exception e) {
System.out.println("��ɱ����ļ�ʧ��!");
}
}
public synchronized String getAUrl() {
String tmpAUrl = arrUrls.get(0);
arrUrls.remove(0);
return tmpAUrl;
}
public synchronized String getUrl() {
String tmpUrl = arrUrl.get(0);
arrUrl.remove(0);
return tmpUrl;
}
public synchronized Integer getIntWebIndex() {
intWebIndex++;
return intWebIndex;
}
public void getWebByHomePage() {
System.out.println("start ... ...");
if( searchCurrent ){
searchKey(getContent( this.getHomePage() ) ,this.getSearchKey());
}
startTime = System.currentTimeMillis();
this.myDomain = getDomain();
if (myDomain == null) {
System.out.println("Wrong input!");
// System.exit(1);
return;
}
// System.out.println("Homepage = " + strHomePage);addReport("Homepage = " + strHomePage + "!\n");System.out.println("Domain = " + myDomain);addReport("Domain = " + myDomain + "!\n");
arrUrls.add(strHomePage);
arrUrl.add(strHomePage);
allUrls.put(strHomePage, 0);
deepUrls.put(strHomePage, 1);
// File fDir = new File(fPath);
// if (!fDir.exists()) {
// fDir.mkdir();
// }
// System.out.println("Start!");this.addReport("Start!\n");
String tmp = getAUrl();
this.getWebByUrl(tmp, charset, allUrls.get(tmp) + "");
int i = 0;
for (i = 0; i < intThreadNum; i++) {
new Thread(new Processer(this)).start();
}
while (true) {
if (arrUrls.isEmpty() && Thread.activeCount() == 1) {
long finishTime = System.currentTimeMillis();
long costTime = finishTime - startTime;
System.out.println("\n\n\n\n\nFinished!");
System.out.println("Start time = " + startTime + " "
+ "Finish time = " + finishTime + " "
+ "Cost time = " + costTime + "ms");
// addReport("Start time = " + startTime + " "
// + "Finish time = " + finishTime + " "
// + "Cost time = " + costTime + "ms" + "\n");
System.out.println("Total url number = "
+ (webSuccessed + webFailed) + " Successed: "
+ webSuccessed + " Failed: " + webFailed);
// addReport("Total url number = " + (webSuccessed + webFailed)
// + " Successed: " + webSuccessed + " Failed: "
// + webFailed + "\n");
String strIndex = "";
String tmpUrl = "";
while (!arrUrl.isEmpty()) {
tmpUrl = getUrl();
strIndex += "Web depth:" + deepUrls.get(tmpUrl)
+ " Filepath: " + fPath + "/web"
+ allUrls.get(tmpUrl) + ".htm" + " url:" + tmpUrl
+ "\n\n";
}
// System.out.println(strIndex);
try {
PrintWriter pwIndex = new PrintWriter(new FileOutputStream(
"fileindex.txt"));
pwIndex.println(strIndex);
pwIndex.close();
} catch (Exception e) {
System.out.println("��������ļ�ʧ��!");
}
break;
}
}
}
private String getContent(String strUrl) {
try {
URL pageUrl = new URL(strUrl);
// Open connection to URL for reading.
BufferedReader reader = new BufferedReader(new InputStreamReader(
pageUrl.openStream()));
// Read page into buffer.
String line;
StringBuffer pageBuffer = new StringBuffer();
while ((line = reader.readLine()) != null) {
pageBuffer.append(line);
if(debug){
System.out.println(line);
}
}
return pageBuffer.toString();
} catch (Exception e) {
}
return null;
}
public void getWebByUrl(String strUrl, String charset, String fileIndex) {
try {
// if(charset==null||"".equals(charset))charset="utf-8";
// System.out.println("Getting web by url: " + strUrl);
// addReport("Getting web by url: " + strUrl + "\n");
URL url = new URL(strUrl);
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
InputStream is = null;
is = url.openStream();
// String filePath = fPath + "/web" + fileIndex + ".htm";
// PrintWriter pw = null;
// FileOutputStream fos = new FileOutputStream(filePath);
// OutputStreamWriter writer = new OutputStreamWriter(fos);
// pw = new PrintWriter(writer);
BufferedReader bReader = new BufferedReader(new InputStreamReader(
is));
StringBuffer sb = new StringBuffer();
String rLine = null;
String tmp_rLine = null;
while ((rLine = bReader.readLine()) != null) {
tmp_rLine = rLine;
int str_len = tmp_rLine.length();
if (str_len > 0) {
sb.append("\n" + tmp_rLine);
// pw.println(tmp_rLine);
// pw.flush();
if (deepUrls.get(strUrl) < webDepth)
getUrlByString(tmp_rLine, strUrl);
}
tmp_rLine = null;
}
is.close();
// pw.close();
if(! searchCurrent ){
searchKey(getContent( strUrl ) ,this.getSearchKey());
}
if(saveUrl){
searchedUrl.add(strUrl);
}
// System.out.println("Get web successfully! " + strUrl);
// addReport("Get web successfully! " + strUrl + "\n");
addWebSuccessed();
} catch (Exception e) {
System.out.println("Get web failed! " + strUrl);
// addReport("Get web failed! " + strUrl + "\n");
addWebFailed();
}
}
public String getDomain() {
String reg = "(?<=http\\://[a-zA-Z0-9]{0,100}[.]{0,1})[^.\\s]*?\\.(com|cn|net|org|biz|info|cc|tv)";
Pattern p = Pattern.compile(reg, Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(strHomePage);
boolean blnp = m.find();
if (blnp == true) {
return m.group(0);
}
return null;
}
public List searchKey(String content , String key){
List list = new ArrayList();
try{
Pattern p = Pattern.compile(key, Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(content);
boolean blnp = m.find();
while (blnp == true) {
String searched = m.group(0);
if(saveSearchKey && searched != null && !searched .trim().replace(" ","") .equals("null") && !searched.trim().replace(" ","") .equals("")){
searchedKey.add(searched);
}
blnp = m.find();
}
}catch (Exception e) {
}
return list;
}
public void getUrlByString(String inputArgs, String strUrl) {
String tmpStr = inputArgs;
String regUrl = "(?<=(href=)[\"]?[\']?)[http://][^\\s\"\'\\?]*("+ myDomain + ")[^\\s\"\'>]*";
Pattern p = Pattern.compile(regUrl, Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(tmpStr);
boolean blnp = m.find();
// int i = 0;
while (blnp == true) {
if (!allUrls.containsKey(m.group(0))) {
// System.out.println("Find a new url,depth:" + (deepUrls.get(strUrl) + 1) + " " + m.group(0));
// addReport("Find a new url,depth:" + (deepUrls.get(strUrl) + 1) + " " + m.group(0) + "\n");
arrUrls.add(m.group(0));
arrUrl.add(m.group(0));
allUrls.put(m.group(0), getIntWebIndex());
deepUrls.put(m.group(0), (deepUrls.get(strUrl) + 1));
}
tmpStr = tmpStr.substring(m.end(), tmpStr.length());
m = p.matcher(tmpStr);
blnp = m.find();
}
}
public static void main(String[] args) {
String url = "http://hi.baidu.com/guoyan227/blog/item/b759012375f0a846925807df.html";//http://www.google.com.hk/search?q=tom&hl=zh-CN&safe=strict&start=10&sa=N
// url = "http://www.google.com.hk/search?hl=zh-CN&q=javagoogle";
// url = "http://www.baidu.com/s?bs=%C0%B5%CE%B0%C3%B7&f=8&wd=%CB%F9%D3%D0%BA%BA%D7%D6";
String key = "[0|2|5|6][0][0][0-9]{3}";
// url = "http://hi.baidu.com/festsoft/blog/item/171feb8860eee492a5c27256.html";
// key = "[\u4e00-\u9fa5]{0,1}";//[\u4e00-\u9fa5]{0,}字词句
GetWeb gw = new GetWeb(url,key);
// gw.setSaveSearchKey(false);
gw.setSearchCurrent(true);
gw.getWebByHomePage();
List list = gw.getSearchedKey();
SpiderDAO dao = new SpiderDAO();
// dao.getStock();
for(int i=0;i<list.size();i++){
// dao.addStockNum((String)list.get(i), "", "");
System.out.println(list.get(i));
}
System.out.println("gsize:"+list.size());
if(list.size() == 0){
list = gw.getSearchedUrl();
System.out.println(list.size());
for(int i=0;i<list.size();i++){
System.out.println(list.get(i));
}
}
}
class Processer implements Runnable {
GetWeb gw;
public Processer(GetWeb g) {
this.gw = g;
}
public void run() {
// Thread.sleep(5000);
while (!arrUrls.isEmpty()) {
String tmp = getAUrl();
getWebByUrl(tmp, charset, allUrls.get(tmp) + "");
}
}
}
}
package web.play.rss.util.dao;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import com.provideo.ibox.connection.DBConnSourceByStruts;
public class SpiderDAO {
Connection con = null;
PreparedStatement ps = null;
public boolean addStockNum(String num,String name,String type){
boolean isadd = false ;
String sql = "insert ignore into Stock (StockNum, StockName, StockType, StockAddr, StockActive)values( ?, ?, ?, '', 2)";
try {
con = new DBConnSourceByStruts().getConnection();
// if(con == null){
// Connectioner conner = new Connectioner();
//
// con = conner.getConnection();
// }
} catch (Exception e1) {
e1.printStackTrace();
}
try{
ps = con.prepareStatement(sql);
ps.setString(1, num);
ps.setString(2, name);
ps.setString(3, type);
ps.execute();
isadd = true;
}catch (Exception e) {
e.printStackTrace();
}finally{
try {
con.close();
} catch (Exception e) {
e.printStackTrace();
}
}
return isadd;
}
public String getStock(){
String rtnStr = "";
String sql = "select * from Stock";
Connection con = null;
try {
con = new DBConnSourceByStruts().getConnection();
} catch (SQLException e1) {
e1.printStackTrace();
}
try{
ps = con.prepareStatement(sql);
ResultSet rs = ps.executeQuery();
while(rs.next()){
System.out.println(rs.getString(1) +" "+rs.getString(2) +" " + rs.getString(3));
}
}catch (Exception e) {
e.printStackTrace();
}finally{
try {
con.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
return rtnStr;
}
}
package web.play.rss.util.dao;
import java.sql.Connection;
import java.sql.DriverManager;
public class Connectioner {
public Connection getConnection() {
Connection con = null;
try {
Class.forName("org.gjt.mm.mysql.Driver").newInstance();
String url = "jdbc:mysql://localhost/dbname?user=root&password=123456";
// myDB为数据库名
con = DriverManager.getConnection(url);
} catch (Exception e) {
e.printStackTrace();
}
return con;
}
public static void main(String[] args) {
Connectioner con = new Connectioner();
con.getConnection();
}
}
Stock CREATE TABLE `Stock` (
`Oid` int(64) NOT NULL auto_increment,
`StockNum` varchar(200) unique default NULL,
`StockName` varchar(200) default NULL,
`StockType` varchar(200) default NULL,
`StockAddr` varchar(200) default NULL,
`StockActive` varchar(20) default NULL,
PRIMARY KEY (`Oid`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
分享到:
相关推荐
在这个Java爬虫项目中,可能包括了URL管理、网页下载、HTML解析、数据提取和存储等多个关键模块。 【标签】"java_spider"表明项目的主要技术栈是Java,这是一种广泛应用于后端开发、大数据处理、网络爬虫等领域的...
- **爬虫框架**:除了自定义实现外,还可以使用成熟的Java爬虫框架,如WebMagic、JsoupCrawler等,它们提供了更完善的基础设施和易于使用的API。 学习这个Java spider源码,不仅可以提升对网络爬虫原理的理解,还能...
本项目是基于Java的强力爬虫Spiderman设计源码,包含...该系统是一个强力Java爬虫,具备列表分页、详细页分页、ajax支持、微内核高扩展性和灵活的配置选项。系统界面设计简洁、易于扩展,适合用于各种需要爬虫的场景。
Java爬虫,通常被称为Spider,是一种使用编程语言(如Java)编写的应用程序,用于自动抓取互联网上的信息。Java作为一款强大的、跨平台的编程语言,非常适合开发爬虫项目。在本篇中,我们将深入探讨Java爬虫的相关...
**Spiderman2 Java爬虫 v1.0** 是一款基于Java开发的网页抓取和解析工具,其设计目的是为了高效地从互联网上抓取并处理网页数据。这款爬虫工具以其简洁的架构和易于扩展的特性,使得用户能够快速地进行网页内容的...
在名为"spiderman-master"的压缩包中,很可能是包含了这个Java爬虫项目的源代码。开发者可以通过阅读代码、运行示例以及查阅文档来深入了解其工作原理和如何自定义配置。此外,理解这个项目的结构和设计模式可以帮助...
在"强力 Java 爬虫spiderman-master.zip"这个压缩包中,我们很可能找到了一个名为"spiderman-master"的项目源码,这通常是一个Java爬虫项目的根目录。该项目可能包含了实现爬虫功能的各种组件和配置,帮助开发者构建...
Python爬虫示例之163spider-masterPython爬虫示例之163spider-masterPython爬虫示例之163spider-masterPython爬虫示例之163spider-masterPython爬虫示例之163spider-masterPython爬虫示例之163spider-masterPython...
本示例"spider"可能是一个简单的Java爬虫项目,包含爬虫的主程序和相关配置,可能使用了Jsoup进行HTML解析。通过学习和运行这个示例,你可以了解一个基本的Java爬虫是如何工作的,并为自己的爬虫项目打下基础。 ...
本篇文章以"一篇文章精通系列-案例开发-巨细"为主题,详细介绍了如何使用HttpClient5、jsoup、WebMagic以及spider-flow这四个关键工具来构建一个强大的Java爬虫系统。以下是对这些工具和技术的深入解析。 首先,...
Java爬虫项目是利用编程语言来自动化收集互联网信息的一种技术,WebMagic是一个开源的Java爬虫框架,专注于简单、快速地实现网页数据抓取。在这个项目中,我们主要探讨WebMagic的源码以及如何利用Java进行爬虫开发。...
在 IT 领域,Java 爬虫技术是一种常用的数据采集工具,它能够自动地在网络上抓取所需信息。WebMagic 是一个强大的、模块化的 Java 爬虫框架,适用于构建高效、灵活的网络爬虫项目。本项目是关于如何使用 WebMagic ...
Java爬虫工具是一种用于自动化网络数据抓取的程序,它能够高效地遍历网页并提取所需信息。在本项目中,开发者使用了WebMagic这个强大的Java爬虫框架,该框架以其简洁的API和灵活的扩展性深受程序员喜爱。WebMagic的...
在Java爬虫中实现响应式布局,首先需要爬取包含响应式样式的网页源代码。这通常涉及解析HTML并提取CSS和JavaScript资源。接着,我们可以利用Jsoup或类似工具分析CSS媒体查询,以理解页面在不同设备条件下的布局。...
Java Web 爬虫,又称为Java Spider或Crawler,是一种自动抓取互联网信息的程序。在Java领域,实现Web爬虫技术可以帮助开发者获取大量网页数据,进行数据分析、搜索引擎优化、市场研究等多种用途。本资源"Java-Web-...
java网络爬虫实例 网络蜘蛛即Web Spider,是一个很形象的名字。把互联网比喻成一个蜘蛛网,那么Spider就是在网上爬来爬去的蜘蛛。网络蜘蛛是通过网页的链接地址来寻找网页 网络蜘蛛 ,从 网站某一个页面(通常是首页...
总的来说,“Spider-Java.zip”提供了一个基础的Java爬虫学习资源,包括理论介绍和实际代码,可以帮助初学者快速了解和实践Java爬虫开发。通过学习和实践这些内容,你可以掌握网络爬虫的基本原理和技巧,为进一步的...
python爬虫示例之baidu-music-spider-masterpython爬虫示例之baidu-music-spider-masterpython爬虫示例之baidu-music-spider-masterpython爬虫示例之baidu-music-spider-masterpython爬虫示例之baidu-music-spider-...
在【标签】中,“java_spider”暗示了这个项目是用Java语言编写的爬虫程序。“网络_爬虫_程序”则强调了它的功能,即在网络中自动浏览并收集信息。网络爬虫是数据挖掘领域的重要工具,广泛应用于搜索引擎、市场分析...