package org.apache.lucene.demo; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.util.Date; import java.util.List; import java.util.Map; import org.apache.commons.dbutils.DbUtils; import org.apache.lucene.analysis.cn.ChineseAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.IntField; import org.apache.lucene.document.LongField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; import org.apache.poi.util.IntegerField; import thtf.ebuilder.website.search.DBIndex; import thtf.ebuilder.website.services.HTMLServices; /** Index all text files under a directory. * <p> * This is a command-line application demonstrating simple Lucene indexing. * Run it with no command-line arguments for usage information. */ public class IndexFiles { private IndexFiles() {} /** Index all text files under a directory. */ public static void main(String[] args) { String indexPath = DBIndex._$.getIndexFile().toString(); boolean add = true; Date start = new Date(); try { Directory dir = FSDirectory.open(new File(indexPath)); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_47, DBIndex._$.analyzer); if (add) { iwc.setOpenMode(OpenMode.CREATE); } else { iwc.setOpenMode(OpenMode.CREATE_OR_APPEND); } IndexWriter writer = new IndexWriter(dir, iwc); indexDocs(writer); // NOTE: if you want to maximize search performance, // you can optionally call forceMerge here. This can be // a terribly costly operation, so generally it's only // worth it when your index is relatively static (ie // you're done adding documents to it): // // writer.forceMerge(1); writer.close(); Date end = new Date(); System.out.println(end.getTime() - start.getTime() + " total milliseconds"); } catch (IOException e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } } /** * Indexes the given file using the given writer, or if a directory is given, * recurses over files and directories found under the given directory. * * NOTE: This method indexes one document per input file. This is slow. For good * throughput, put multiple documents into your input file(s). An example of this is * in the benchmark module, which can create "line doc" files, one document per line, * using the * <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html" * >WriteLineDocTask</a>. * * @param writer Writer to the index where the given file/dir info will be stored * @param file The file to index, or the directory to recurse into to find files to index * @throws IOException If there is a low-level I/O error */ static void indexDocs(IndexWriter writer) throws IOException { try { // make a new, empty document List list=new DbUtils().queryToMapList("select info_id,info_title,info_content from up_info limit 500"); for(int i=0;i<list.size();i++){ Map map=(Map)list.get(i); Document doc = new Document(); Field info_id = new IntField("info_id",Integer.valueOf(String.valueOf(map.get("info_id"))), Field.Store.YES); doc.add(info_id); Field info_title = new StringField("info_title", map.get("info_title")==null?"": map.get("info_title").toString(), Field.Store.YES); doc.add(info_title); Field info_content = new TextField("info_content", map.get("info_content")==null?"": HTMLServices.clearHTMLToString(map.get("info_content").toString()), Field.Store.YES); doc.add(info_content); if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { writer.addDocument(doc); } else { writer.updateDocument(new Term("info_id",map.get("info_id")==null?"1": map.get("info_id").toString() ), doc); } } writer.commit(); // // Add the path of the file as a field named "path". Use a // // field that is indexed (i.e. searchable), but don't tokenize // // the field into separate words and don't index term frequency // // or positional information: // Field pathField = new StringField("path", file.getPath(), Field.Store.YES); // doc.add(pathField); // // // Add the last modified date of the file a field named "modified". // // Use a LongField that is indexed (i.e. efficiently filterable with // // NumericRangeFilter). This indexes to milli-second resolution, which // // is often too fine. You could instead create a number based on // // year/month/day/hour/minutes/seconds, down the resolution you require. // // For example the long value 2011021714 would mean // // February 17, 2011, 2-3 PM. // doc.add(new LongField("modified", file.lastModified(), Field.Store.NO)); // // // Add the contents of the file to a field named "contents". Specify a Reader, // // so that the text of the file is tokenized and indexed, but not stored. // // Note that FileReader expects the file to be in UTF-8 encoding. // // If that's not the case searching for special characters will fail. // BufferedReader _content=new BufferedReader(new InputStreamReader(fis, "UTF-8")); // System.out.println(_content); // doc.add(new TextField("contents", _content)); // // if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { // // New index, so we just add the document (no old document can be there): // System.out.println("adding " + file); // writer.addDocument(doc); // } else { // // Existing index (an old copy of this document may have been indexed) so // // we use updateDocument instead to replace the old one matching the exact // // path, if present: // System.out.println("updating " + file); // writer.updateDocument(new Term("path", file.getPath()), doc); // } }catch (Exception e) { e.printStackTrace(); } } }
相关推荐
2. 创建索引:利用Lucene API创建索引,可以将数据库中的内容(如文章、产品信息等)进行文本分析,然后存储到内存或磁盘上的索引文件中。 3. 实现搜索引擎:通过SpringMVC控制器接收用户的搜索请求,调用Lucene的...
1. 创建索引: - 初始化Directory对象,选择存储索引的物理位置。 - 实例化Analyzer,根据需求选择合适的分词策略。 - 使用IndexWriterConfig配置索引写入参数,创建IndexWriter。 - 创建Document对象,添加字段...
在这个“ssh集成Lucene4.7demo”项目中,开发者将SSH框架与Lucene 4.7版本的全文搜索引擎进行了整合,同时还引入了IKAnalyzer作为中文分词器,以支持对中文文本的处理。这个示例项目不仅包含了基本的整合工作,还...
1. 创建索引:首先,需要将待搜索的数据通过Analyzer进行预处理,然后创建Document对象并添加Field,最后将Document添加到IndexWriter中,完成索引的构建。 2. 执行查询:使用QueryParser解析用户的搜索请求,得到...
### Solr 4.7 从数据库导入数据创建索引详解 #### 一、概述 在实际工程应用中,从数据库导出数据并创建索引来优化搜索效率是一种常见的做法。本文将详细介绍如何使用Solr 4.7从SQL Server 2005数据库中导入数据并...
Solr 4.7 是一个基于 Lucene 的全文检索服务器,它提供了强大的搜索功能和配置灵活性。IK Analyzer 2012FF_hf1 是一个针对中文的分词器,专为处理中文文本而设计,旨在提高中文文本的索引和搜索效率。这个组合在描述...
- **8.1.2** 创建索引文件。 **8.2 搜索引擎配置-概述** - 学习如何配置Lucene搜索引擎的基本设置。 **8.3 搜索引擎配置-文档类型** - 定义索引文档的结构和字段。 **8.4 搜索引擎配置-分析器** - 选择合适的文本...