- 浏览: 54077 次
- 性别:
- 来自: 深圳
文章分类
最新评论
public class IndexDao {
String indexPath = "E:\\luceneIndex";
// Analyzer analyzer = new StandardAnalyzer();
Analyzer analyzer = new MMAnalyzer();// 词库分词
/**
* 添加/创建索引
*
* @param doc
*/
public void save(Document doc) {
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(indexPath, analyzer, MaxFieldLength.LIMITED);
indexWriter.addDocument(doc);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* Term是搜索的最小单位,代表某个 Field 中的一个关键词,如:<title, lucene>
*
* new Term( "title", "lucene" );
*
* new Term( "id", "5" );
*
* new Term( "id", UUID );
*
* @param term
*/
public void delete(Term term) {
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(indexPath, analyzer, MaxFieldLength.LIMITED);
indexWriter.deleteDocuments(term);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* 更新索引
*
* <pre>
* indexWriter.deleteDocuments(term);
* indexWriter.addDocument(doc);
* </pre>
*
* @param term
* @param doc
*/
public void update(Term term, Document doc) {
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(indexPath, analyzer, MaxFieldLength.LIMITED);
indexWriter.updateDocument(term, doc);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* <pre>
* totalPage = recordCount / pageSize;
* if (recordCount % pageSize > 0)
* totalPage++;
* </pre>
* @param queryString
* @param firstResult
* @param maxResults
* @return
*/
public QueryResult search(String queryString, int firstResult, int maxResults) {
try {
// 1,把要搜索的文本解析为 Query
String[] fields = { "name", "content" };
Map<String, Float> boosts = new HashMap<String, Float>();
boosts.put("name", 3f);
// boosts.put("content", 1.0f); 默认为1.0f
QueryParser queryParser = new MultiFieldQueryParser(fields, analyzer, boosts);
Query query = queryParser.parse(queryString);
return search(query, firstResult, maxResults);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public QueryResult search(Query query, int firstResult, int maxResults) {
IndexSearcher indexSearcher = null;
try {
// 2,进行查询
indexSearcher = new IndexSearcher(indexPath);
Filter filter = new RangeFilter("size", NumberTools.longToString(200)
, NumberTools.longToString(1000), true, true);
// ========== 排序
Sort sort = new Sort();
sort.setSort(new SortField("size")); // 默认为升序
// sort.setSort(new SortField("size", true));
// ==========
TopDocs topDocs = indexSearcher.search(query, filter, 10000, sort);
int recordCount = topDocs.totalHits;
List<Document> recordList = new ArrayList<Document>();
// ============== 准备高亮器
Formatter formatter = new SimpleHTMLFormatter("<font color='red'>", "</font>");
Scorer scorer = new QueryScorer(query);
Highlighter highlighter = new Highlighter(formatter, scorer);
Fragmenter fragmenter = new SimpleFragmenter(50);
highlighter.setTextFragmenter(fragmenter);
// ==============
// 3,取出当前页的数据
int end = Math.min(firstResult + maxResults, topDocs.totalHits);
for (int i = firstResult; i < end; i++) {
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int docSn = scoreDoc.doc; // 文档内部编号
Document doc = indexSearcher.doc(docSn); // 根据编号取出相应的文档
// =========== 高亮
// 返回高亮后的结果,如果当前属性值中没有出现关键字,会返回 null
String hc = highlighter.getBestFragment(analyzer, "content", doc.get("content"));
if (hc == null) {
String content = doc.get("content");
int endIndex = Math.min(50, content.length());
hc = content.substring(0, endIndex);// 最多前50个字符
}
doc.getField("content").setValue(hc);
// ===========
recordList.add(doc);
}
// 返回结果
return new QueryResult(recordCount, recordList);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexSearcher.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
//==============
public class IndexDaoTest {
String filePath = "E:\\luceneDatasource\\IndexWriter addDocument's a javadoc .txt";
String filePath2 = "E:\\luceneDatasource\\小笑话_总统的房间 Room .txt";
IndexDao indexDao = new IndexDao();
@Test
public void testSave() {
Document doc = File2DocumentUtils.file2Document(filePath);
doc.setBoost(3f);
indexDao.save(doc);
Document doc2 = File2DocumentUtils.file2Document(filePath2);
// doc2.setBoost(1.0f);
indexDao.save(doc2);
}
@Test
public void testDelete() {
Term term = new Term("path", filePath);
indexDao.delete(term);
}
@Test
public void testUpdate() {
Term term = new Term("path", filePath);
Document doc = File2DocumentUtils.file2Document(filePath);
doc.getField("content").setValue("这是更新后的文件内容");
indexDao.update(term, doc);
}
@Test
public void testSearch() {
// String queryString = "IndexWriter";
// String queryString = "房间";
// String queryString = "笑话";
String queryString = "room";
// String queryString = "content:绅士";
QueryResult qr = indexDao.search(queryString, 0, 10);
System.out.println("总共有【" + qr.getRecordCount() + "】条匹配结果");
for (Document doc : qr.getRecordList()) {
File2DocumentUtils.printDocumentInfo(doc);
}
}
}
//============
public class QueryResult {
private int recordCount;
private List<Document> recordList;
public QueryResult(int recordCount, List<Document> recordList) {
super();
this.recordCount = recordCount;
this.recordList = recordList;
}
public int getRecordCount() {
return recordCount;
}
public void setRecordCount(int recordCount) {
this.recordCount = recordCount;
}
public List<Document> getRecordList() {
return recordList;
}
public void setRecordList(List<Document> recordList) {
this.recordList = recordList;
}
}
//==============
public class AnalyzerTest {
String enText = "IndexWriter addDocument's a javadoc.txt";
// String zhText = "我们是中国人";
// String zhText = "小笑话_总统的房间 Room .txt";
String zhText = "一位绅士到旅游胜地的一家饭店要开个房间";
Analyzer en1 = new StandardAnalyzer(); // 单字分词
Analyzer en2 = new SimpleAnalyzer();
Analyzer zh1 = new CJKAnalyzer(); // 二分法分词
Analyzer zh2 = new MMAnalyzer(); // 词库分词
@Test
public void test() throws Exception {
// analyze(en2, enText);
// analyze(en1, zhText);
// analyze(zh1, zhText);
analyze(zh2, zhText);
}
public void analyze(Analyzer analyzer, String text) throws Exception {
System.out.println("-------------> 分词器:" + analyzer.getClass());
TokenStream tokenStream = analyzer.tokenStream("content", new StringReader(text));
for (Token token = new Token(); (token = tokenStream.next(token)) != null;) {
System.out.println(token);
}
}
}
//===========
public class DirectoryTest {
String filePath = "E:\\IndexWriter addDocument 's javadoc.txt";
String indexPath = "E:\\luceneIndex";
Analyzer analyzer = new StandardAnalyzer();
@Test
public void test1()throws Exception {
// Directory dir = FSDirectory.getDirectory(indexPath);
Directory dir = new RAMDirectory();
Document doc = File2DocumentUtils.file2Document(filePath);
IndexWriter indexWriter = new IndexWriter(dir, analyzer, MaxFieldLength.LIMITED);
indexWriter.addDocument(doc);
indexWriter.close();
}
@Test
public void test2() throws Exception{
Directory fsDir = FSDirectory.getDirectory(indexPath);
// 1,启动时读取
Directory ramDir = new RAMDirectory(fsDir);
// 运行程序时操作 ramDir
IndexWriter ramIndexWriter = new IndexWriter(ramDir, analyzer, MaxFieldLength.LIMITED);
// 添加 Document
Document doc = File2DocumentUtils.file2Document(filePath);
ramIndexWriter.addDocument(doc);
ramIndexWriter.close();
// 2,退出时保存
IndexWriter fsIndexWriter = new IndexWriter(fsDir, analyzer,true, MaxFieldLength.LIMITED);
fsIndexWriter.addIndexesNoOptimize(new Directory[]{ramDir});
// fsIndexWriter.flush();
// fsIndexWriter.optimize();
fsIndexWriter.close();
}
@Test
public void test3() throws Exception{
Directory fsDir = FSDirectory.getDirectory(indexPath);
IndexWriter fsIndexWriter = new IndexWriter(fsDir, analyzer, MaxFieldLength.LIMITED);
fsIndexWriter.optimize();
fsIndexWriter.close();
}
}
//=================
public class HelloWorld {
String filePath = "E:\\lesson\\20090723就业班\\workspace\\LuceneDemo\\luceneDatasource\\IndexWriter addDocument's a javadoc.txt";
String indexPath = "E:\\lesson\\20090723就业班\\workspace\\LuceneDemo\\luceneIndex";
Analyzer analyzer = new StandardAnalyzer();
/**
* 创建索引
*
* IndexWriter 是用来操作(增、删、改)索引库的
*/
@Test
public void createIndex() throws Exception {
// file --> doc
Document doc = File2DocumentUtils.file2Document(filePath);
// 建立索引
IndexWriter indexWriter = new IndexWriter(indexPath, analyzer, true,
MaxFieldLength.LIMITED);
indexWriter.addDocument(doc);
indexWriter.close();
}
/**
* 搜索
*
* IndexSearcher 是用来在索引库中进行查询的
*/
@Test
public void search() throws Exception {
// String queryString = "document";
String queryString = "adddocument";
// 1,把要搜索的文本解析为 Query
String[] fields = { "name", "content" };
QueryParser queryParser = new MultiFieldQueryParser(fields, analyzer);
Query query = queryParser.parse(queryString);
// 2,进行查询
IndexSearcher indexSearcher = new IndexSearcher(indexPath);
Filter filter = null;
TopDocs topDocs = indexSearcher.search(query, filter, 10000);
System.out.println("总共有【" + topDocs.totalHits + "】条匹配结果");
// 3,打印结果
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int docSn = scoreDoc.doc; // 文档内部编号
Document doc = indexSearcher.doc(docSn); // 根据编号取出相应的文档
File2DocumentUtils.printDocumentInfo(doc); // 打印出文档信息
}
}
}
//================
public class QueryTest {
IndexDao indexDao = new IndexDao();
public void queryAndPrintResult(Query query) {
System.out.println("对应的查询字符串:" + query);
QueryResult qr = indexDao.search(query, 0, 100);
System.out.println("总共有【" + qr.getRecordCount() + "】条匹配结果");
for (Document doc : qr.getRecordList()) {
File2DocumentUtils.printDocumentInfo(doc);
}
}
/**
* 关键词查询
*
* name:room
*/
@Test
public void testTermQuery() {
// Term term = new Term("name", "房间");
// Term term = new Term("name", "Room"); // 英文关键词全是小写字符
Term term = new Term("name", "room");
Query query = new TermQuery(term);
queryAndPrintResult(query);
}
/**
* 范围查询
*
* 包含边界:size:[0000000000001e TO 000000000000rs]
*
* 不包含边界:size:{0000000000001e TO 000000000000rs}
*/
@Test
public void testRangeQuery() {
Term lowerTerm = new Term("size", NumberTools.longToString(50));
Term upperTerm = new Term("size", NumberTools.longToString(1000));
Query query = new RangeQuery(lowerTerm, upperTerm, false);
queryAndPrintResult(query);
}
// public static void main(String[] args) {
// System.out.println(Long.MAX_VALUE);
// System.out.println(NumberTools.longToString(1000));
// System.out.println(NumberTools.stringToLong("000000000000rs"));
//
// System.out.println(DateTools.dateToString(new Date(), Resolution.DAY));
// System.out.println(DateTools.dateToString(new Date(), Resolution.MINUTE));
// System.out.println(DateTools.dateToString(new Date(), Resolution.SECOND));
// }
/**
* 通配符查询
*
* '?' 代表一个字符, '*' 代表0个或多个字符
*
* name:房*
*
* name:*o*
*
* name:roo?
*/
@Test
public void testWildcardQuery() {
Term term = new Term("name", "roo?");
// Term term = new Term("name", "ro*"); // 前缀查询 PrefixQuery
// Term term = new Term("name", "*o*");
// Term term = new Term("name", "房*");
Query query = new WildcardQuery(term);
queryAndPrintResult(query);
}
/**
* 短语查询
*
* content:"? 绅士 ? ? 饭店"
*
* content:"绅士 饭店"~2
*/
@Test
public void testPhraseQuery() {
PhraseQuery phraseQuery = new PhraseQuery();
// phraseQuery.add(new Term("content", "绅士"), 1);
// phraseQuery.add(new Term("content", "饭店"), 4);
phraseQuery.add(new Term("content", "绅士"));
phraseQuery.add(new Term("content", "饭店"));
phraseQuery.setSlop(2);
queryAndPrintResult(phraseQuery);
}
/**
* +content:"绅士 饭店"~2 -size:[000000000000dw TO 000000000000rs]
*
* +content:"绅士 饭店"~2 +size:[000000000000dw TO 000000000000rs]
*
* content:"绅士 饭店"~2 size:[000000000000dw TO 000000000000rs]
*
* +content:"绅士 饭店"~2 size:[000000000000dw TO 000000000000rs]
*/
@Test
public void testBooleanQuery() {
// 条件1
PhraseQuery query1 = new PhraseQuery();
query1.add(new Term("content", "绅士"));
query1.add(new Term("content", "饭店"));
query1.setSlop(2);
// 条件2
Term lowerTerm = new Term("size", NumberTools.longToString(500));
Term upperTerm = new Term("size", NumberTools.longToString(1000));
Query query2 = new RangeQuery(lowerTerm, upperTerm, true);
// 组合
BooleanQuery boolQuery = new BooleanQuery();
boolQuery.add(query1, Occur.MUST);
boolQuery.add(query2, Occur.SHOULD);
queryAndPrintResult(boolQuery);
}
@Test
public void testQueryString() {
// String queryString = "+content:\"绅士 饭店\"~2 -size:[000000000000dw TO 000000000000rs]";
// String queryString = "content:\"绅士 饭店\"~2 AND size:[000000000000dw TO 000000000000rs]";
// String queryString = "content:\"绅士 饭店\"~2 OR size:[000000000000dw TO 000000000000rs]";
// String queryString = "(content:\"绅士 饭店\"~2 NOT size:[000000000000dw TO 000000000000rs])";
// String queryString = "-content:\"绅士 饭店\"~2 AND -size:[000000000000dw TO 000000000000rs]";
// String queryString = "-content:\"绅士 饭店\"~2 OR -size:[000000000000dw TO 000000000000rs]";
String queryString = "-content:\"绅士 饭店\"~2 NOT -size:[000000000000dw TO 000000000000rs]";
QueryResult qr = indexDao.search(queryString, 0, 10);
System.out.println("总共有【" + qr.getRecordCount() + "】条匹配结果");
for (Document doc : qr.getRecordList()) {
File2DocumentUtils.printDocumentInfo(doc);
}
}
}
//============
public class File2DocumentUtils {
// 文件:name, content, size, path
public static Document file2Document(String path) {
File file = new File(path);
Document doc = new Document();
doc.add(new Field("name", file.getName(), Store.YES, Index.ANALYZED));
doc.add(new Field("content", readFileContent(file), Store.YES, Index.ANALYZED));
doc.add(new Field("size", NumberTools.longToString(file.length()), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("path", file.getAbsolutePath(), Store.YES, Index.NOT_ANALYZED));
return doc;
}
// public static void document2File(Document doc ){
//
// }
/**
* 读取文件内容
*/
public static String readFileContent(File file) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(file)));
StringBuffer content = new StringBuffer();
for (String line = null; (line = reader.readLine()) != null;) {
content.append(line).append("\n");
}
return content.toString();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* <pre>
* 获取 name 属性的值的两种方法:
* 1,Field f = doc.getField("name");
* f.stringValue();
* 2,doc.get("name");
* </pre>
*
* @param doc
*/
public static void printDocumentInfo(Document doc) {
// Field f = doc.getField("name");
// f.stringValue();
System.out.println("------------------------------");
System.out.println("name = " + doc.get("name"));
System.out.println("content = " + doc.get("content"));
System.out.println("size = " + NumberTools.stringToLong(doc.get("size")));
System.out.println("path = " + doc.get("path"));
}
}
String indexPath = "E:\\luceneIndex";
// Analyzer analyzer = new StandardAnalyzer();
Analyzer analyzer = new MMAnalyzer();// 词库分词
/**
* 添加/创建索引
*
* @param doc
*/
public void save(Document doc) {
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(indexPath, analyzer, MaxFieldLength.LIMITED);
indexWriter.addDocument(doc);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* Term是搜索的最小单位,代表某个 Field 中的一个关键词,如:<title, lucene>
*
* new Term( "title", "lucene" );
*
* new Term( "id", "5" );
*
* new Term( "id", UUID );
*
* @param term
*/
public void delete(Term term) {
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(indexPath, analyzer, MaxFieldLength.LIMITED);
indexWriter.deleteDocuments(term);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* 更新索引
*
* <pre>
* indexWriter.deleteDocuments(term);
* indexWriter.addDocument(doc);
* </pre>
*
* @param term
* @param doc
*/
public void update(Term term, Document doc) {
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(indexPath, analyzer, MaxFieldLength.LIMITED);
indexWriter.updateDocument(term, doc);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* <pre>
* totalPage = recordCount / pageSize;
* if (recordCount % pageSize > 0)
* totalPage++;
* </pre>
* @param queryString
* @param firstResult
* @param maxResults
* @return
*/
public QueryResult search(String queryString, int firstResult, int maxResults) {
try {
// 1,把要搜索的文本解析为 Query
String[] fields = { "name", "content" };
Map<String, Float> boosts = new HashMap<String, Float>();
boosts.put("name", 3f);
// boosts.put("content", 1.0f); 默认为1.0f
QueryParser queryParser = new MultiFieldQueryParser(fields, analyzer, boosts);
Query query = queryParser.parse(queryString);
return search(query, firstResult, maxResults);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public QueryResult search(Query query, int firstResult, int maxResults) {
IndexSearcher indexSearcher = null;
try {
// 2,进行查询
indexSearcher = new IndexSearcher(indexPath);
Filter filter = new RangeFilter("size", NumberTools.longToString(200)
, NumberTools.longToString(1000), true, true);
// ========== 排序
Sort sort = new Sort();
sort.setSort(new SortField("size")); // 默认为升序
// sort.setSort(new SortField("size", true));
// ==========
TopDocs topDocs = indexSearcher.search(query, filter, 10000, sort);
int recordCount = topDocs.totalHits;
List<Document> recordList = new ArrayList<Document>();
// ============== 准备高亮器
Formatter formatter = new SimpleHTMLFormatter("<font color='red'>", "</font>");
Scorer scorer = new QueryScorer(query);
Highlighter highlighter = new Highlighter(formatter, scorer);
Fragmenter fragmenter = new SimpleFragmenter(50);
highlighter.setTextFragmenter(fragmenter);
// ==============
// 3,取出当前页的数据
int end = Math.min(firstResult + maxResults, topDocs.totalHits);
for (int i = firstResult; i < end; i++) {
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int docSn = scoreDoc.doc; // 文档内部编号
Document doc = indexSearcher.doc(docSn); // 根据编号取出相应的文档
// =========== 高亮
// 返回高亮后的结果,如果当前属性值中没有出现关键字,会返回 null
String hc = highlighter.getBestFragment(analyzer, "content", doc.get("content"));
if (hc == null) {
String content = doc.get("content");
int endIndex = Math.min(50, content.length());
hc = content.substring(0, endIndex);// 最多前50个字符
}
doc.getField("content").setValue(hc);
// ===========
recordList.add(doc);
}
// 返回结果
return new QueryResult(recordCount, recordList);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
try {
indexSearcher.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
//==============
public class IndexDaoTest {
String filePath = "E:\\luceneDatasource\\IndexWriter addDocument's a javadoc .txt";
String filePath2 = "E:\\luceneDatasource\\小笑话_总统的房间 Room .txt";
IndexDao indexDao = new IndexDao();
@Test
public void testSave() {
Document doc = File2DocumentUtils.file2Document(filePath);
doc.setBoost(3f);
indexDao.save(doc);
Document doc2 = File2DocumentUtils.file2Document(filePath2);
// doc2.setBoost(1.0f);
indexDao.save(doc2);
}
@Test
public void testDelete() {
Term term = new Term("path", filePath);
indexDao.delete(term);
}
@Test
public void testUpdate() {
Term term = new Term("path", filePath);
Document doc = File2DocumentUtils.file2Document(filePath);
doc.getField("content").setValue("这是更新后的文件内容");
indexDao.update(term, doc);
}
@Test
public void testSearch() {
// String queryString = "IndexWriter";
// String queryString = "房间";
// String queryString = "笑话";
String queryString = "room";
// String queryString = "content:绅士";
QueryResult qr = indexDao.search(queryString, 0, 10);
System.out.println("总共有【" + qr.getRecordCount() + "】条匹配结果");
for (Document doc : qr.getRecordList()) {
File2DocumentUtils.printDocumentInfo(doc);
}
}
}
//============
public class QueryResult {
private int recordCount;
private List<Document> recordList;
public QueryResult(int recordCount, List<Document> recordList) {
super();
this.recordCount = recordCount;
this.recordList = recordList;
}
public int getRecordCount() {
return recordCount;
}
public void setRecordCount(int recordCount) {
this.recordCount = recordCount;
}
public List<Document> getRecordList() {
return recordList;
}
public void setRecordList(List<Document> recordList) {
this.recordList = recordList;
}
}
//==============
public class AnalyzerTest {
String enText = "IndexWriter addDocument's a javadoc.txt";
// String zhText = "我们是中国人";
// String zhText = "小笑话_总统的房间 Room .txt";
String zhText = "一位绅士到旅游胜地的一家饭店要开个房间";
Analyzer en1 = new StandardAnalyzer(); // 单字分词
Analyzer en2 = new SimpleAnalyzer();
Analyzer zh1 = new CJKAnalyzer(); // 二分法分词
Analyzer zh2 = new MMAnalyzer(); // 词库分词
@Test
public void test() throws Exception {
// analyze(en2, enText);
// analyze(en1, zhText);
// analyze(zh1, zhText);
analyze(zh2, zhText);
}
public void analyze(Analyzer analyzer, String text) throws Exception {
System.out.println("-------------> 分词器:" + analyzer.getClass());
TokenStream tokenStream = analyzer.tokenStream("content", new StringReader(text));
for (Token token = new Token(); (token = tokenStream.next(token)) != null;) {
System.out.println(token);
}
}
}
//===========
public class DirectoryTest {
String filePath = "E:\\IndexWriter addDocument 's javadoc.txt";
String indexPath = "E:\\luceneIndex";
Analyzer analyzer = new StandardAnalyzer();
@Test
public void test1()throws Exception {
// Directory dir = FSDirectory.getDirectory(indexPath);
Directory dir = new RAMDirectory();
Document doc = File2DocumentUtils.file2Document(filePath);
IndexWriter indexWriter = new IndexWriter(dir, analyzer, MaxFieldLength.LIMITED);
indexWriter.addDocument(doc);
indexWriter.close();
}
@Test
public void test2() throws Exception{
Directory fsDir = FSDirectory.getDirectory(indexPath);
// 1,启动时读取
Directory ramDir = new RAMDirectory(fsDir);
// 运行程序时操作 ramDir
IndexWriter ramIndexWriter = new IndexWriter(ramDir, analyzer, MaxFieldLength.LIMITED);
// 添加 Document
Document doc = File2DocumentUtils.file2Document(filePath);
ramIndexWriter.addDocument(doc);
ramIndexWriter.close();
// 2,退出时保存
IndexWriter fsIndexWriter = new IndexWriter(fsDir, analyzer,true, MaxFieldLength.LIMITED);
fsIndexWriter.addIndexesNoOptimize(new Directory[]{ramDir});
// fsIndexWriter.flush();
// fsIndexWriter.optimize();
fsIndexWriter.close();
}
@Test
public void test3() throws Exception{
Directory fsDir = FSDirectory.getDirectory(indexPath);
IndexWriter fsIndexWriter = new IndexWriter(fsDir, analyzer, MaxFieldLength.LIMITED);
fsIndexWriter.optimize();
fsIndexWriter.close();
}
}
//=================
public class HelloWorld {
String filePath = "E:\\lesson\\20090723就业班\\workspace\\LuceneDemo\\luceneDatasource\\IndexWriter addDocument's a javadoc.txt";
String indexPath = "E:\\lesson\\20090723就业班\\workspace\\LuceneDemo\\luceneIndex";
Analyzer analyzer = new StandardAnalyzer();
/**
* 创建索引
*
* IndexWriter 是用来操作(增、删、改)索引库的
*/
@Test
public void createIndex() throws Exception {
// file --> doc
Document doc = File2DocumentUtils.file2Document(filePath);
// 建立索引
IndexWriter indexWriter = new IndexWriter(indexPath, analyzer, true,
MaxFieldLength.LIMITED);
indexWriter.addDocument(doc);
indexWriter.close();
}
/**
* 搜索
*
* IndexSearcher 是用来在索引库中进行查询的
*/
@Test
public void search() throws Exception {
// String queryString = "document";
String queryString = "adddocument";
// 1,把要搜索的文本解析为 Query
String[] fields = { "name", "content" };
QueryParser queryParser = new MultiFieldQueryParser(fields, analyzer);
Query query = queryParser.parse(queryString);
// 2,进行查询
IndexSearcher indexSearcher = new IndexSearcher(indexPath);
Filter filter = null;
TopDocs topDocs = indexSearcher.search(query, filter, 10000);
System.out.println("总共有【" + topDocs.totalHits + "】条匹配结果");
// 3,打印结果
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int docSn = scoreDoc.doc; // 文档内部编号
Document doc = indexSearcher.doc(docSn); // 根据编号取出相应的文档
File2DocumentUtils.printDocumentInfo(doc); // 打印出文档信息
}
}
}
//================
public class QueryTest {
IndexDao indexDao = new IndexDao();
public void queryAndPrintResult(Query query) {
System.out.println("对应的查询字符串:" + query);
QueryResult qr = indexDao.search(query, 0, 100);
System.out.println("总共有【" + qr.getRecordCount() + "】条匹配结果");
for (Document doc : qr.getRecordList()) {
File2DocumentUtils.printDocumentInfo(doc);
}
}
/**
* 关键词查询
*
* name:room
*/
@Test
public void testTermQuery() {
// Term term = new Term("name", "房间");
// Term term = new Term("name", "Room"); // 英文关键词全是小写字符
Term term = new Term("name", "room");
Query query = new TermQuery(term);
queryAndPrintResult(query);
}
/**
* 范围查询
*
* 包含边界:size:[0000000000001e TO 000000000000rs]
*
* 不包含边界:size:{0000000000001e TO 000000000000rs}
*/
@Test
public void testRangeQuery() {
Term lowerTerm = new Term("size", NumberTools.longToString(50));
Term upperTerm = new Term("size", NumberTools.longToString(1000));
Query query = new RangeQuery(lowerTerm, upperTerm, false);
queryAndPrintResult(query);
}
// public static void main(String[] args) {
// System.out.println(Long.MAX_VALUE);
// System.out.println(NumberTools.longToString(1000));
// System.out.println(NumberTools.stringToLong("000000000000rs"));
//
// System.out.println(DateTools.dateToString(new Date(), Resolution.DAY));
// System.out.println(DateTools.dateToString(new Date(), Resolution.MINUTE));
// System.out.println(DateTools.dateToString(new Date(), Resolution.SECOND));
// }
/**
* 通配符查询
*
* '?' 代表一个字符, '*' 代表0个或多个字符
*
* name:房*
*
* name:*o*
*
* name:roo?
*/
@Test
public void testWildcardQuery() {
Term term = new Term("name", "roo?");
// Term term = new Term("name", "ro*"); // 前缀查询 PrefixQuery
// Term term = new Term("name", "*o*");
// Term term = new Term("name", "房*");
Query query = new WildcardQuery(term);
queryAndPrintResult(query);
}
/**
* 短语查询
*
* content:"? 绅士 ? ? 饭店"
*
* content:"绅士 饭店"~2
*/
@Test
public void testPhraseQuery() {
PhraseQuery phraseQuery = new PhraseQuery();
// phraseQuery.add(new Term("content", "绅士"), 1);
// phraseQuery.add(new Term("content", "饭店"), 4);
phraseQuery.add(new Term("content", "绅士"));
phraseQuery.add(new Term("content", "饭店"));
phraseQuery.setSlop(2);
queryAndPrintResult(phraseQuery);
}
/**
* +content:"绅士 饭店"~2 -size:[000000000000dw TO 000000000000rs]
*
* +content:"绅士 饭店"~2 +size:[000000000000dw TO 000000000000rs]
*
* content:"绅士 饭店"~2 size:[000000000000dw TO 000000000000rs]
*
* +content:"绅士 饭店"~2 size:[000000000000dw TO 000000000000rs]
*/
@Test
public void testBooleanQuery() {
// 条件1
PhraseQuery query1 = new PhraseQuery();
query1.add(new Term("content", "绅士"));
query1.add(new Term("content", "饭店"));
query1.setSlop(2);
// 条件2
Term lowerTerm = new Term("size", NumberTools.longToString(500));
Term upperTerm = new Term("size", NumberTools.longToString(1000));
Query query2 = new RangeQuery(lowerTerm, upperTerm, true);
// 组合
BooleanQuery boolQuery = new BooleanQuery();
boolQuery.add(query1, Occur.MUST);
boolQuery.add(query2, Occur.SHOULD);
queryAndPrintResult(boolQuery);
}
@Test
public void testQueryString() {
// String queryString = "+content:\"绅士 饭店\"~2 -size:[000000000000dw TO 000000000000rs]";
// String queryString = "content:\"绅士 饭店\"~2 AND size:[000000000000dw TO 000000000000rs]";
// String queryString = "content:\"绅士 饭店\"~2 OR size:[000000000000dw TO 000000000000rs]";
// String queryString = "(content:\"绅士 饭店\"~2 NOT size:[000000000000dw TO 000000000000rs])";
// String queryString = "-content:\"绅士 饭店\"~2 AND -size:[000000000000dw TO 000000000000rs]";
// String queryString = "-content:\"绅士 饭店\"~2 OR -size:[000000000000dw TO 000000000000rs]";
String queryString = "-content:\"绅士 饭店\"~2 NOT -size:[000000000000dw TO 000000000000rs]";
QueryResult qr = indexDao.search(queryString, 0, 10);
System.out.println("总共有【" + qr.getRecordCount() + "】条匹配结果");
for (Document doc : qr.getRecordList()) {
File2DocumentUtils.printDocumentInfo(doc);
}
}
}
//============
public class File2DocumentUtils {
// 文件:name, content, size, path
public static Document file2Document(String path) {
File file = new File(path);
Document doc = new Document();
doc.add(new Field("name", file.getName(), Store.YES, Index.ANALYZED));
doc.add(new Field("content", readFileContent(file), Store.YES, Index.ANALYZED));
doc.add(new Field("size", NumberTools.longToString(file.length()), Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("path", file.getAbsolutePath(), Store.YES, Index.NOT_ANALYZED));
return doc;
}
// public static void document2File(Document doc ){
//
// }
/**
* 读取文件内容
*/
public static String readFileContent(File file) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(file)));
StringBuffer content = new StringBuffer();
for (String line = null; (line = reader.readLine()) != null;) {
content.append(line).append("\n");
}
return content.toString();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* <pre>
* 获取 name 属性的值的两种方法:
* 1,Field f = doc.getField("name");
* f.stringValue();
* 2,doc.get("name");
* </pre>
*
* @param doc
*/
public static void printDocumentInfo(Document doc) {
// Field f = doc.getField("name");
// f.stringValue();
System.out.println("------------------------------");
System.out.println("name = " + doc.get("name"));
System.out.println("content = " + doc.get("content"));
System.out.println("size = " + NumberTools.stringToLong(doc.get("size")));
System.out.println("path = " + doc.get("path"));
}
}
相关推荐
**Lucene简介** Lucene是Apache软件基金会的一个开放源代码项目,它是一个全文搜索引擎库,提供了文本检索的基本功能。Lucene并非一个完整的搜索引擎,而是一个工具包,允许开发人员将全文搜索功能添加到他们的应用...
3. 实战练习:编写小项目,应用Lucene解决实际问题,加深对源码的理解。 总结,Lucene 4.10.3的源码是深入了解搜索引擎技术的宝贵资源,它揭示了文本检索的内在逻辑和优化策略。通过深入研究源码,开发者不仅可以...
此外,实战练习是非常重要的,通过创建实际的搜索应用,可以更好地理解和应用这些技术。 总的来说,掌握Lucene、Compass和Ajax的综合运用,将使开发者能够在Web应用中构建出强大且用户友好的搜索功能,提升整体项目...
《深入理解Lucene 3.4.0:构建高效全文搜索引擎》 Lucene是一个高性能、全文本搜索库,由Apache软件...通过文档、示例代码和实际练习,我们可以逐步掌握Lucene的核心技术,提升开发效率,为用户提供更优质的搜索体验。
2. **索引过程**:Lucene的索引过程包括分析、编码和存储。分析阶段,原始文本被分词并转化为索引的术语。编码是为了节省存储空间和提高搜索速度。存储则涉及如何在磁盘上组织这些数据。 3. **查询处理**:Lucene...
2. **Lucene API**:熟悉Lucene的API,如IndexWriter、Directory、Analyzer、Document和Query等,以及如何使用它们来创建和管理索引。 3. **索引构建**:学习如何处理各种数据源(如文件系统、数据库),将数据转化...
这本书的配套源代码包含了大量的示例和实战练习,帮助读者更好地理解和应用Lucene的核心概念和技术。 Lucene是一个开放源码的全文检索库,由Java编写,为开发人员提供了高级文本检索功能的基础架构。它并不提供一个...
2. **文档(Documents)**: 在 Lucene 中,文档是一个逻辑单元,包含一组字段(Fields),每个字段都有特定的名称和内容。例如,网页的标题、正文和链接都可以作为不同的字段。 3. **查询(Query)**: Lucene 支持...
2. **安装与配置**:如何下载、安装和设置Lucene,以及如何将它集成到Java项目中,让开发者能够快速开始使用。 3. **索引构建**:涵盖文档分析、字段处理、分词器选择和定制,以及如何使用Analyzer和Document对象...
2. **IndexWriter**: 这个类负责创建和更新Lucene索引。通过`IndexWriter`,你可以添加、删除文档,并控制索引的物理结构,如段合并策略。使用`addDocument(Document doc)`方法可以向索引中添加新文档。 3. **...
《Lucene实战(第2版)》是一本深入探讨Apache Lucene搜索引擎库的权威指南,旨在帮助读者理解和应用Lucene进行高效的信息检索。Lucene是一个高性能、全文本搜索库,广泛应用于各种信息检索系统,包括网站搜索、文档...
《开发自己的搜索引擎——Lucene+Heritrix(第2版)》是一本深入探讨如何构建搜索引擎的专著,其中包含了Lucene和Heritrix两个关键工具的详细使用指南。这本书旨在帮助开发者理解搜索引擎的工作原理,并提供实践性的...
【Lucene3.0课程】主要讲解了如何利用Lucene这一全文检索库来...Lucene的使用涉及到索引构建、查询解析、结果排序等多个方面,通过实践练习,如为“传智播客贴吧”添加搜索功能,可以加深对全文检索原理和技术的理解。
2. 索引(Index):Lucene通过建立索引来实现快速搜索。索引是倒排索引,其中每个术语都指向包含该术语的文档列表。这种数据结构使得查找匹配术语的文档变得高效。 3. 文档(Document):在Lucene中,文档是一组...
2. Searcher(搜索器):搜索器用于执行查询并返回匹配的文档。用户输入的查询会被分析,转换为与索引中的项进行比较的形式。匹配的文档会被排序并返回给用户。虽然示例代码没有展示搜索器的部分,但在实际应用中,...
2. **社区与论坛**:参与Lucene相关的社区和论坛讨论,获取最新的信息和解决问题的策略。 3. **实践项目**:结合实际项目进行练习,将理论知识转化为实际技能。 总之,《Lucene搜索引擎开发权威经典》源码包是深入...
2. **索引过程**:Lucene的索引过程包括分析(Tokenization)、词项化(Term Tokenization)、倒排索引(Inverted Index)等步骤。分析器(Analyzer)用于将文档内容拆分成可搜索的术语,而倒排索引则是Lucene高效...
学习Lucene API 需要理解其核心概念,并通过实际项目练习来加深理解。可以先从创建简单的索引和搜索开始,逐步探索更复杂的查询和优化技术。Apache Lucene的官方文档、社区资源和教程都是宝贵的参考资料。 总之,...
lucene的一些练习 练习适当的lucene,搜索引擎 您必须具有Java虚拟机。 使用UTF-8字符编码。 请记住,在Windows中,CLASSPATH的元素由;分隔; 在诸如macOS之类的其他系统上,它们之间用:分隔。 第1部分档案包括...
【Java 课程设计】基于JAVA(结合lucene)的公交搜索系统(完整源代码+详细注释) 【优秀课程设计】主要针对计算机相关专业的正在做毕设的学生和需要项目实战练习的学习者,也可作为课程设计、期末大作业。 包含全部项目...