转自:http://cumtfirefly.iteye.com/blog/543664
lucene3.0已于2009-11-25发布啦,但网上的入门实例都是针对lucene3.0以前的,相对于以前的版本,貌似改动不小。
本人从头开始学习lucene,现在用的是《lucene in action中文版》,结合lucene3.0文档写了个入门实例,可供像我一样直接从lucene3.0开始学习的新手参考!
入门实例:
1.预处理:先把网上下载的一个《三国演义》电子书“三国演义.txt”(可用其他代替,呵呵)切割成多个小文件。
-
-
-
-
-
-
public class FilePreprocess {
-
public static void main(String[] arg){
-
String outputpath = "D:\\test\\small\\";
-
String filename = "D:\\test\\三国演义.txt";
-
if(!new File(outputpath).exists()){
-
new File(outputpath).mkdirs();
- }
-
splitToSmallFiles(new File(filename), outputpath);
- }
-
-
-
-
-
public static void splitToSmallFiles(File file ,String outputpath){
-
int filePointer = 0;
-
int MAX_SIZE = 10240;
-
String filename = "output";
-
-
BufferedWriter writer = null;
-
try {
-
BufferedReader reader = new BufferedReader(new FileReader(file));
-
StringBuffer buffer = new StringBuffer();
- String line = reader.readLine();
-
while(line != null){
-
buffer.append(line).append("\r\n");
-
if(buffer.toString().getBytes().length>=MAX_SIZE){
-
writer = new BufferedWriter(new FileWriter(outputpath+filename+filePointer+".txt"));
- writer.write(buffer.toString());
- writer.close();
- filePointer++;
-
buffer=new StringBuffer();
- }
- line = reader.readLine();
- }
-
writer = new BufferedWriter(new FileWriter(outputpath+filename+filePointer+".txt"));
- writer.write(buffer.toString());
- writer.close();
-
System.out.println("The file hava splited to small files !");
-
} catch (FileNotFoundException e) {
-
System.out.println("file not found !");
- e.printStackTrace();
-
} catch (IOException e) {
- e.printStackTrace();
- }
- }
/**
* @author ht
* 预处理
*
*/
public class FilePreprocess {
public static void main(String[] arg){
String outputpath = "D:\\test\\small\\";//小文件存放路径
String filename = "D:\\test\\三国演义.txt";//原文件存放路径
if(!new File(outputpath).exists()){
new File(outputpath).mkdirs();
}
splitToSmallFiles(new File(filename), outputpath);
}
/**大文件切割为小的
* @param file
* @param outputpath
*/
public static void splitToSmallFiles(File file ,String outputpath){
int filePointer = 0;
int MAX_SIZE = 10240;
String filename = "output";
BufferedWriter writer = null;
try {
BufferedReader reader = new BufferedReader(new FileReader(file));
StringBuffer buffer = new StringBuffer();
String line = reader.readLine();
while(line != null){
buffer.append(line).append("\r\n");
if(buffer.toString().getBytes().length>=MAX_SIZE){
writer = new BufferedWriter(new FileWriter(outputpath+filename+filePointer+".txt"));
writer.write(buffer.toString());
writer.close();
filePointer++;
buffer=new StringBuffer();
}
line = reader.readLine();
}
writer = new BufferedWriter(new FileWriter(outputpath+filename+filePointer+".txt"));
writer.write(buffer.toString());
writer.close();
System.out.println("The file hava splited to small files !");
} catch (FileNotFoundException e) {
System.out.println("file not found !");
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
2.用lucene3.0生成索引类:用lencene3.0对生成的多个小文件进行索引,中文分词用的是lucene3.0自带的StandardAnalyzer.
-
-
-
-
-
-
public class Indexer {
-
private static String INDEX_DIR = "D:\\test\\index";
-
private static String DATA_DIR = "D:\\test\\small\\";
-
-
public static void main(String[] args) throws Exception {
-
-
long start = new Date().getTime();
-
int numIndexed = index(new File(INDEX_DIR), new File(DATA_DIR));
-
long end = new Date().getTime();
-
System.out.println("Indexing " + numIndexed + " files took " + (end - start) + " milliseconds");
- }
-
-
-
-
-
-
-
-
public static int index(File indexDir, File dataDir) throws IOException {
-
-
if (!dataDir.exists() || !dataDir.isDirectory()) {
-
throw new IOException(dataDir + " does not exist or is not a directory");
- }
-
-
IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir), new StandardAnalyzer(Version.LUCENE_CURRENT), true,
-
IndexWriter.MaxFieldLength.LIMITED);
-
- indexDirectory(writer, dataDir);
-
int numIndexed = writer.numDocs();
- writer.optimize();
- writer.close();
-
return numIndexed;
- }
-
-
-
-
-
-
-
private static void indexDirectory(IndexWriter writer, File dir)
-
throws IOException {
-
- File[] files = dir.listFiles();
-
-
for (int i = 0; i < files.length; i++) {
- File f = files[i];
-
if (f.isDirectory()) {
-
indexDirectory(writer, f);
-
} else if (f.getName().endsWith(".txt")) {
- indexFile(writer, f);
- }
- }
- }
-
-
-
-
-
-
-
private static void indexFile(IndexWriter writer, File f)
-
throws IOException {
-
-
if (f.isHidden() || !f.exists() || !f.canRead()) {
-
return;
- }
-
-
System.out.println("Indexing " + f.getCanonicalPath());
-
Document doc = new Document();
-
doc.add(new Field("contents",new FileReader(f)));
-
doc.add(new Field("filename",f.getCanonicalPath(),Field.Store.YES, Field.Index.ANALYZED));
-
- writer.addDocument(doc);
- }
- }
/**
* @author ht
* 索引生成
*
*/
public class Indexer {
private static String INDEX_DIR = "D:\\test\\index";//索引存放目录
private static String DATA_DIR = "D:\\test\\small\\";//小文件存放的目录
public static void main(String[] args) throws Exception {
long start = new Date().getTime();
int numIndexed = index(new File(INDEX_DIR), new File(DATA_DIR));//调用index方法
long end = new Date().getTime();
System.out.println("Indexing " + numIndexed + " files took " + (end - start) + " milliseconds");
}
/**索引dataDir下的.txt文件,并储存在indexDir下,返回索引的文件数量
* @param indexDir
* @param dataDir
* @return int
* @throws IOException
*/
public static int index(File indexDir, File dataDir) throws IOException {
if (!dataDir.exists() || !dataDir.isDirectory()) {
throw new IOException(dataDir + " does not exist or is not a directory");
}
IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir), new StandardAnalyzer(Version.LUCENE_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);//有变化的地方
indexDirectory(writer, dataDir);
int numIndexed = writer.numDocs();
writer.optimize();
writer.close();
return numIndexed;
}
/**循环遍历目录下的所有.txt文件并进行索引
* @param writer
* @param dir
* @throws IOException
*/
private static void indexDirectory(IndexWriter writer, File dir)
throws IOException {
File[] files = dir.listFiles();
for (int i = 0; i < files.length; i++) {
File f = files[i];
if (f.isDirectory()) {
indexDirectory(writer, f); // recurse
} else if (f.getName().endsWith(".txt")) {
indexFile(writer, f);
}
}
}
/**对单个txt文件进行索引
* @param writer
* @param f
* @throws IOException
*/
private static void indexFile(IndexWriter writer, File f)
throws IOException {
if (f.isHidden() || !f.exists() || !f.canRead()) {
return;
}
System.out.println("Indexing " + f.getCanonicalPath());
Document doc = new Document();
doc.add(new Field("contents",new FileReader(f)));//有变化的地方
doc.add(new Field("filename",f.getCanonicalPath(),Field.Store.YES, Field.Index.ANALYZED));//有变化的地方
writer.addDocument(doc);
}
}
3.查询类:查询“玄德”!
-
-
-
-
-
-
public class Searcher {
-
private static String INDEX_DIR = "D:\\test\\index\\";
-
private static String KEYWORD = "玄德";
-
private static int TOP_NUM = 100;
-
-
public static void main(String[] args) throws Exception {
-
File indexDir = new File(INDEX_DIR);
-
if (!indexDir.exists() || !indexDir.isDirectory()) {
-
throw new Exception(indexDir +
-
" does not exist or is not a directory.");
- }
-
search(indexDir, KEYWORD);
- }
-
-
-
-
-
-
public static void search(File indexDir, String q) throws Exception {
-
IndexSearcher is = new IndexSearcher(FSDirectory.open(indexDir),true);
-
String field = "contents";
-
-
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, new StandardAnalyzer(Version.LUCENE_CURRENT));
- Query query = parser.parse(q);
-
-
TopScoreDocCollector collector = TopScoreDocCollector.create(TOP_NUM , false);
-
-
long start = new Date().getTime();
-
- is.search(query, collector);
- ScoreDoc[] hits = collector.topDocs().scoreDocs;
-
- System.out.println(hits.length);
-
for (int i = 0; i < hits.length; i++) {
-
Document doc = is.doc(hits[i].doc);
-
System.out.println(doc.getField("filename")+" "+hits[i].toString()+" ");
- }
-
long end = new Date().getTime();
-
-
System.out.println("Found " + collector.getTotalHits() +
-
" document(s) (in " + (end - start) +
-
" milliseconds) that matched query '" +
-
q + "':");
- }
- }
/**
* @author ht
* 查询
*
*/
public class Searcher {
private static String INDEX_DIR = "D:\\test\\index\\";//索引所在的路径
private static String KEYWORD = "玄德";//关键词
private static int TOP_NUM = 100;//显示前100条结果
public static void main(String[] args) throws Exception {
File indexDir = new File(INDEX_DIR);
if (!indexDir.exists() || !indexDir.isDirectory()) {
throw new Exception(indexDir +
" does not exist or is not a directory.");
}
search(indexDir, KEYWORD);//调用search方法进行查询
}
/**查询
* @param indexDir
* @param q
* @throws Exception
*/
public static void search(File indexDir, String q) throws Exception {
IndexSearcher is = new IndexSearcher(FSDirectory.open(indexDir),true);//read-only
String field = "contents";
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, new StandardAnalyzer(Version.LUCENE_CURRENT));//有变化的地方
Query query = parser.parse(q);
TopScoreDocCollector collector = TopScoreDocCollector.create(TOP_NUM , false);//有变化的地方
long start = new Date().getTime();// start time
is.search(query, collector);
ScoreDoc[] hits = collector.topDocs().scoreDocs;
System.out.println(hits.length);
for (int i = 0; i < hits.length; i++) {
Document doc = is.doc(hits[i].doc);//new method is.doc()
System.out.println(doc.getField("filename")+" "+hits[i].toString()+" ");
}
long end = new Date().getTime();//end time
System.out.println("Found " + collector.getTotalHits() +
" document(s) (in " + (end - start) +
" milliseconds) that matched query '" +
q + "':");
}
}
4.结果就不贴啦,反正能运行就是啦
分享到:
相关推荐
**Lucene 3.0 入门实例** Lucene 是一个高性能、全文本搜索库,由 Apache 软件基金会开发。它提供了完整的搜索功能,包括索引、查询解析、排序以及高级的文本分析能力。在 Lucene 3.0 版本中,开发者可以利用其强大...
**Lucene 3.0 入门实例及关键知识点** Lucene 是一个开源的全文搜索引擎库,由 Apache 软件基金会开发。它为开发者提供了在应用程序中实现文本搜索功能的强大工具。本实例主要针对 Lucene 3.0 版本,这个版本虽然...
这个入门实例将引导我们了解如何使用Lucene 3.0版本进行基本的索引和搜索操作。以下是对Lucene 3.0关键知识点的详细讲解: 1. **Lucene的架构**: Lucene的核心组件包括文档(Document)、字段(Field)、索引...
**Lucene 3.0 全文检索入门实例** Lucene 是一个开源的全文检索库,由 Apache 软件基金会开发。它提供了一个高级、灵活的搜索功能框架,允许开发者在自己的应用中轻松地集成全文检索功能。本文将重点介绍如何使用 ...
《Lucene 3.0 完成入门》 Lucene 是一个开源的全文检索库,由 Apache 软件基金会维护。它为开发者提供了一种高级的文本搜索功能,允许他们在应用程序中集成强大的搜索引擎。本篇文章将围绕 Lucene 3.0 版本,详细...
《Lucene 3.0 入门:搜索引擎开发的基础指南》 Lucene 是一个高性能、全文本搜索库,由 Apache 软件基金会维护。它为开发者提供了在各种应用程序中实现全文索引和搜索功能的强大工具。Lucene 3.0 版本是其历史上的...
自2006年12月发布1.0版以来,IK Analyzer 经历了多次升级,3.0版已演变为独立于 Lucene 的通用分词组件,同时也为 Lucene 提供了优化的集成。 1. **IK Analyzer 3.0 结构设计** - IK Analyzer 3.0 的设计旨在提高...
**全文搜索引擎Lucene入门** 全文搜索引擎Lucene是Apache软件基金会的一个开放源代码项目,它为Java开发者提供了一个高性能、可扩展的信息检索库。Lucene以其强大的文本搜索功能和高效的索引能力,在各种需要全文...
##### 2.3 Lucene用户快速入门 对于使用Lucene的开发者来说,IKAnalyzer提供了简便的集成方式。下面是一个简单的代码示例,展示了如何使用IKAnalyzer进行文本分词: ```java public class IKAnalyzerDemo { ...
##### Lucene用户快速入门 对于Lucene用户而言,使用IKAnalyzer进行分词操作可以通过以下步骤实现: 1. 导入必要的包,例如`Analyzer`、`Document`、`Field`、`IndexWriter`、`IndexSearcher`、`Query`等。 2. ...
- **EJB**:如《Enterprise JavaBeans 3.0》(第五版),深入讲解EJB,适用于大规模分布式系统开发。 **搜索技术** - **《Lucene in action》**:经典之作,介绍了Lucene的基本原理,虽版本较低但仍具有参考价值。 ...
- **简介**:本书深入介绍了面向对象编程的核心概念,并提供了大量实例来帮助读者理解和掌握Java编程。 - **适用人群**:适合已有一定Java基础的学习者,用于加深对Java编程的理解。 3. **《Java JDK实例宝典》**...