2016-12-15 项目中需要加载下面几个工具包

lunece全文检索的入门与简单优化-LMLPHP

 1 package com.cn.shupu.util;

 import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Date;
import java.util.List; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory; public class LuneceTest { // 用来文件
public static List<File> files = new ArrayList<File>(); // 保存索引文件
public static List<File> saveFile(File sourceFile) { File[] fs = sourceFile.listFiles(); for (int i = 0; i < fs.length; i++) { if (fs[i].isFile()) { files.add(fs[i]);
} else { saveFile(fs[i]);
} } return files; } // 获取文件中的内容
public static String saveContent(File file) { String content = ""; try { // 创建一个字符输入流
FileInputStream fis = new FileInputStream(file); // 创建一个桥梁让字节流转化为字符流
InputStreamReader isr = new InputStreamReader(fis, "GBK");
// 创建一个字符流缓存区
BufferedReader br = new BufferedReader(isr); String s = null; while ((s = br.readLine()) != null) { // 把内容拼接起来
content = content + "\n" + s; } // 关闭流
br.close();
isr.close();
fis.close(); } catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} return content; } // 创建索引
public static void createIndex(String sourcePath, String indexPath) { // 要索引的文件路径
File sourceFile = new File(sourcePath); // 获取索引文件的文件
List<File> fs = saveFile(sourceFile); Date date1 = new Date(); // 对文件进行索引
// 索引文件存放路径
Directory dir; try { // 索引优化
RAMDirectory rd = new RAMDirectory();
// 这里的路径为保存索引的路径
dir = FSDirectory.open(Paths.get(indexPath));
// 创建一个分词器
SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);// 初始化索引器
// 创建索引器
// IndexWriter writer = new IndexWriter(dir, iwc);//
// 第一个参数为索引存放的位置,第二参数为初始化参数 IndexWriter writer = new IndexWriter(rd, iwc); IndexWriterConfig iwc2 = new IndexWriterConfig(analyzer);// 初始化索引器
iwc2.setOpenMode(OpenMode.CREATE_OR_APPEND);
iwc2.setMaxBufferedDocs(256);
iwc2.setRAMBufferSizeMB(100); // SetMergeFactor是控制segment合并频率的,其决定了一个索引块中包括多少个文档,当硬盘上的索引块达到多少时,
// 将它们合并成一个较大的索引块。当MergeFactor值较大时,生成索引的速度较快。MergeFactor的默认值是10,建议在建立索引前将其设置的大一些。
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
mergePolicy.setMergeFactor(100);
iwc2.setMergePolicy(mergePolicy); // 把缓存在 RAMDirectory 中的所有数据写入 FSDirectory
IndexWriter is = new IndexWriter(dir, iwc2); for (int i = 0; i < fs.size(); i++) { String content = ""; // 只对.txt文件进行索引
// 获取文件类型
int a = fs.get(i).getName().lastIndexOf("."); String fileType = fs.get(i).getName().substring(a + 1); if ("txt".equals(fileType)) { content = saveContent(fs.get(i)); // System.out.println(content); // System.out.println(fs.get(i).getName());
Document doc = new Document(); // doc.add(new TextField("filename", fs.get(i).getName(),
// Store.YES));
doc.add(new TextField("path", fs.get(i).getAbsolutePath(), Store.YES));
doc.add(new TextField("content", content, Store.YES));
writer.addDocument(doc); } }
writer.commit();
writer.close();
//
is.addIndexes(new Directory[] { rd });
is.forceMerge(5);
is.forceMerge(100);
is.maybeMerge();
is.close(); } catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} Date date2 = new Date();
System.out.println("创建索引-----耗时:" + (date2.getTime() - date1.getTime()) + "ms\n"); } // 搜索文件
public static void search(String indexPath, String keyword) { Date date1 = new Date(); Directory dir; try { dir = FSDirectory.open(Paths.get(indexPath)); // 创建分词器
SmartChineseAnalyzer analyzer = new SmartChineseAnalyzer();
// 打开索引的存放位置
DirectoryReader dr = DirectoryReader.open(dir); // 创建检索器
IndexSearcher is = new IndexSearcher(dr); QueryParser parser = new QueryParser("content", analyzer);// 第一个参数为要搜索的字段名,第二额分词器 Query query = parser.parse(keyword); TopDocs results = is.search(query, 100); ScoreDoc[] hits = results.scoreDocs;// 检索得到结果集 for (int i = 0; i < hits.length; i++) { // 获取一个条记录
Document doc = is.doc(hits[i].doc); // 获取路径
String path = doc.get("path");
// 获取 内容
String content = doc.get("content");
String filename = doc.get("filename"); String s = hight(query, analyzer, "content", content, 200); System.out.println("____________________________"); // System.out.println("文件名--------" + filename);
//
// sSystem.out.println("内容:---------" + s); System.out.println("文件路径-----" + path); System.out.println("____________________________"); } dr.close();
dir.close(); } catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} Date date2 = new Date();
System.out.println("查看索引-----耗时:" + (date2.getTime() - date1.getTime()) + "ms\n"); } /**
* 获取高亮显示结果的html代码
*
* @param query
* 查询
* @param analyzer
* 分词器
* @param fieldName
* 域名
* @param fieldContent
* 域内容
* @param fragmentSize
* 结果的长度(不含html标签长度)
* @return 结果(一段html代码)
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
public static String hight(Query query, Analyzer analyzer, String filedname, String filedContent, int fragmentSize)
throws IOException, InvalidTokenOffsetsException { // 创建一个高亮器
Highlighter hightlighter = new Highlighter(new SimpleHTMLFormatter("<font color='red'>", "</font>"),
new QueryScorer(query)); // 查询结果的内容长度设定
Fragmenter framenter = new SimpleFragmenter(fragmentSize); hightlighter.setTextFragmenter(framenter); return hightlighter.getBestFragment(analyzer, filedname, filedContent);
} public static void main(String[] args) { createIndex("D:\\图书文件", "c:\\expal"); search("c:\\expal", "炎");// 22385ms 10380ms 35221ms 96583ms 66798ms
// 169438ms 179478ms }
05-11 11:08