1. 程式人生 > >Lucene學習之高亮顯示

Lucene學習之高亮顯示

在搜尋引擎中我們經常會看到這樣的情景:
這裡寫圖片描述
紅色部分我們稱之為高亮顯示,lucene提供了HighLighter模組來實現這一功能。
高亮顯示模組通常包含兩個獨立的功能,首先是動態拆分,就是從匹配搜尋的大量文字中選取一小部分句子。第二個內容就是高亮顯示。
我們先來看下高亮顯示的原理:
一、TokenSources:

IndexReader reader = DirectoryReader.open(ramDir);
TokenStream tokenStream = TokenSources.getAnyTokenStream(reader,
                    hits.scoreDocs
[i].doc, FIELD_NAME, doc, analyzer);

既然要高亮顯示,那麼就得找到原始文字,也就是要通過與索引期間使用的分析器重新分析分析文字。TokenSources的靜態方法可以幫助從資料來源中提取TokenStream。
這裡寫圖片描述
二、拿到TokenStream後,Lucene=會將文字拆分成獨立的片段也就是要進行:

highlighter.setTextFragmenter(new SimpleFragmenter(40));

三、Fragment輸出片段序列,Lucene必須挑出最適合的一個,其實這一步體現在HighLighter的構造器中:

QueryScorer scorer = new
QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer);

但是結果的返回則是高亮模組的這個函式:

String result = highlighter.getBestFragments(tokenStream, text,maxNumFragmentsRequired, "...");

來實現,其中text是搜尋的域,看下例項程式碼:

import java.io.IOException;
import java.util.Arrays;

import org.apache.lucene.analysis.Analyzer;
import
org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.IntField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.highlight.Formatter; import org.apache.lucene.search.highlight.Fragmenter; import org.apache.lucene.search.highlight.Highlighter; import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.QueryTermScorer; import org.apache.lucene.search.highlight.Scorer; import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleHTMLEncoder; import org.apache.lucene.search.highlight.SimpleHTMLFormatter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.apache.lucene.search.highlight.TokenSources; import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; /** * 高亮簡單測試 * * @author Lanxiaowei * */ public class SimpleHightlightTest { final int QUERY = 0; final int QUERY_TERM = 1; final String FIELD_NAME = "contents"; private static final String NUMERIC_FIELD_NAME = "nfield"; private Directory ramDir = new RAMDirectory(); private Analyzer analyzer = new StandardAnalyzer(); int numHighlights = 0; TopDocs hits; int mode = QUERY; Fragmenter frag = new SimpleFragmenter(20);//拆分器,將原始文字拆分成高亮片段 final FieldType FIELD_TYPE_TV; { FieldType fieldType = new FieldType(TextField.TYPE_STORED); fieldType.setStoreTermVectors(true); fieldType.setStoreTermVectorPositions(true); fieldType.setStoreTermVectorPayloads(true); fieldType.setStoreTermVectorOffsets(true); fieldType.freeze(); FIELD_TYPE_TV = fieldType; } String[] texts = { "Hello this is a piece of text that is very long and contains too much preamble and the meat is really here which says kennedy has been shot", "This piece of text refers to Kennedy at the beginning then has a longer piece of text that is very long in the middle and finally ends with another reference to Kennedy", "JFK has been shot", "John Kennedy Kennedy has been shot", "This text has a typo in referring to Keneddy", "wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" }; /** * 建立測試索引 * * @throws IOException */ public void createIndex() throws IOException { // Analyzer analyzer = new StandardAnalyzer(); IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig( analyzer)); // 新增幾個文字域 for (String text : texts) { writer.addDocument(doc(FIELD_NAME, text)); } // 新增幾個數字域 Document doc = new Document(); doc.add(new IntField(NUMERIC_FIELD_NAME, 1, Field.Store.NO)); doc.add(new StoredField(NUMERIC_FIELD_NAME, 1)); writer.addDocument(doc); doc = new Document(); doc.add(new IntField(NUMERIC_FIELD_NAME, 3, Field.Store.NO)); doc.add(new StoredField(NUMERIC_FIELD_NAME, 3)); writer.addDocument(doc); doc = new Document(); doc.add(new IntField(NUMERIC_FIELD_NAME, 5, Field.Store.NO)); doc.add(new StoredField(NUMERIC_FIELD_NAME, 5)); writer.addDocument(doc); doc = new Document(); doc.add(new IntField(NUMERIC_FIELD_NAME, 7, Field.Store.NO)); doc.add(new StoredField(NUMERIC_FIELD_NAME, 7)); writer.addDocument(doc); Document childDoc = doc(FIELD_NAME, "child document"); Document parentDoc = doc(FIELD_NAME, "parent document"); writer.addDocuments(Arrays.asList(childDoc, parentDoc)); // 強制合併段檔案,限制合併後段檔案個數最大數量 writer.forceMerge(1); writer.close(); } /** * 為Document新增域 * * @param name * @param value * @return */ private Document doc(String name, String value) { Document d = new Document(); d.add(new Field(name, value, FIELD_TYPE_TV)); return d; } /** * 建立Token物件 * * @param term * @param start * @param offset * @return */ private static Token createToken(String term, int start, int offset) { return new Token(term, start, offset); } public Highlighter getHighlighter(Query query, String fieldName, Formatter formatter) { return getHighlighter(query, fieldName, formatter, true);//構建包括:查詢、域名、高亮格式 } /** * 建立高亮器 * * @param query * @param fieldName * @param formatter * @param expanMultiTerm * @return */ public Highlighter getHighlighter(Query query, String fieldName, Formatter formatter, boolean expanMultiTerm) { Scorer scorer; if (mode == QUERY) { scorer = new QueryScorer(query, fieldName); // 是否展開多Term查詢 if (!expanMultiTerm) { ((QueryScorer) scorer).setExpandMultiTermQuery(false); } } else if (mode == QUERY_TERM) { scorer = new QueryTermScorer(query); } else { throw new RuntimeException("Unknown highlight mode"); } return new Highlighter(formatter, scorer); } /** * 獲取高亮後的文字(如果高亮失敗,則返回原樣文字) * * @param query * @param fieldName * @param text * @return * @throws IOException * @throws InvalidTokenOffsetsException */ private String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { // 將使用者輸入的搜尋關鍵字通過分詞器轉化為TokenStream TokenStream tokenStream = analyzer.tokenStream(fieldName, text); // SimpleHTMLFormatter預設是使用<B></B> SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(); // 第3個引數表示預設域 QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME); Highlighter highlighter = new Highlighter(formatter, scorer); highlighter.setTextFragmenter(new SimpleFragmenter(Integer.MAX_VALUE)); // maxNumFragments:最大的高亮個數,separator:多個高亮段之間的分隔符,預設是... String rv = highlighter.getBestFragments(tokenStream, text, 1, "..."); return rv.length() == 0 ? text : rv; } public Query doSearching(Query unReWrittenQuery) throws Exception { IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); // 對於MultiTermQuery, TermRangeQuery, PrefixQuery,你如果使用QueryTermScorer而非QueryScorer, //那麼你必須對MultiTermQuery, TermRangeQuery, PrefixQuery進行rewrite Query query = unReWrittenQuery.rewrite(reader); hits = searcher.search(query, null, 1000); return query; } public void testHighlightingWithDefaultField() throws Exception { String s1 = "I call our world world Flatland, not because we call it so"; PhraseQuery q = new PhraseQuery(); // 表示兩個Term之間最大3個間距 q.setSlop(3); q.add(new Term(FIELD_NAME, "world")); q.add(new Term(FIELD_NAME, "flatland")); String observed = highlightField(q, FIELD_NAME, s1); System.out.println(observed); q = new PhraseQuery(); q.setSlop(3); q.add(new Term("text", "world")); q.add(new Term("text", "flatland")); // 高亮域域查詢時Query域不一致,所以無法高亮,這個務必注意 observed = highlightField(q, FIELD_NAME, s1); System.out.println(observed); } /** * CommonTermsQuery中使用高亮 * * @throws Exception */ public void testHighlightingCommonTermsQuery() throws Exception { createIndex(); // 第一個引數:頻率高的Term必須出現,第二個引數:頻率低的Term可有可無,第三個引數表示Term出現的最大頻率 CommonTermsQuery query = new CommonTermsQuery(Occur.MUST, Occur.SHOULD, 3); query.add(new Term(FIELD_NAME, "this")); query.add(new Term(FIELD_NAME, "long")); query.add(new Term(FIELD_NAME, "very")); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); TopDocs hits = searcher.search(query, 10); System.out.println("hits.totalHits:" + hits.totalHits); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); Document doc = searcher.doc(hits.scoreDocs[0].doc); String storedField = doc.get(FIELD_NAME); TokenStream stream = TokenSources.getAnyTokenStream( searcher.getIndexReader(), hits.scoreDocs[0].doc, FIELD_NAME, doc, analyzer); Fragmenter fragmenter = new SimpleSpanFragmenter(scorer); highlighter.setTextFragmenter(fragmenter); String fragment = highlighter.getBestFragment(stream, storedField); System.out.println("fragment:" + fragment); doc = searcher.doc(hits.scoreDocs[1].doc); storedField = doc.get(FIELD_NAME); stream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), hits.scoreDocs[1].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer)); fragment = highlighter.getBestFragment(stream, storedField); // 列印第二個匹配結果高亮後的結果,預設是加<B></B> System.out.println("fragment:" + fragment); reader.close(); ramDir.close(); } /** * 測試下高亮最大顯示個數和高亮段顯示字元長度控制 * * @throws Exception */ public void testSimpleTermQueryHighlighter() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); Query query = doSearching(new TermQuery(new Term(FIELD_NAME, "kennedy"))); // 這裡不能簡單的使用TermQuery,MultiTermQuery,需要query.rewriter下,需要引起你們的注意 // Query query = new TermQuery(new Term(FIELD_NAME, "kennedy")); // 設定最大顯示的高亮段個數,即顯示<B></B>的個數 int maxNumFragmentsRequired = 1; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); // SimpleFragmenter建構函式裡的這個引數表示顯示的高亮段字元的總長度<B></B>標籤也是計算在內的 // 自己調整這個數字,數數顯示的高亮段字元的長度去感受下,你就懂了 highlighter.setTextFragmenter(new SimpleFragmenter(17)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } public void testSimplePhraseQueryHightlighting() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD_NAME, "very")); phraseQuery.add(new Term(FIELD_NAME, "long")); phraseQuery.add(new Term(FIELD_NAME, "contains"), 3); // 如果不對Query進行rewrite,你將會得到一個NullPointerException Query query = doSearching(phraseQuery); // 這兩個引數很詭異 //當你設定最多顯示2個高亮段,但如果SimpleFragmenter構造引數設定的最大段字元長度能夠顯示超過2個高亮段,則會無視maxNumFragmentsRequired設定 //相反如果你最大能顯示的段字元長度設定的很小不足以顯示1個高亮段,而最多能顯示的高亮段個數大於1,這是最大能顯示的段字元長度設定無效,以最多能顯示的高亮段個數為準。 int maxNumFragmentsRequired = 3; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(2)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } // 測試2 phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD_NAME, "piece"), 1); phraseQuery.add(new Term(FIELD_NAME, "text"), 3); phraseQuery.add(new Term(FIELD_NAME, "refers"), 4); phraseQuery.add(new Term(FIELD_NAME, "kennedy"), 6); query = doSearching(phraseQuery); maxNumFragmentsRequired = 2; scorer = new QueryScorer(query, FIELD_NAME); highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } /** * 在正則查詢中使用高亮器 * * @throws Exception */ public void testRegexQueryHightlighting() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); Query query = new RegexpQuery(new Term(FIELD_NAME, "ken.*")); searcher = new IndexSearcher(reader); hits = searcher.search(query, 100); int maxNumFragmentsRequired = 2; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } /** * 在萬用字元查詢中使用高亮器 * * @throws Exception */ public void testWildcardQueryHightlighting() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); Query query = new WildcardQuery(new Term(FIELD_NAME, "k?nnedy")); searcher = new IndexSearcher(reader); hits = searcher.search(query, 100); int maxNumFragmentsRequired = 2; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } /** * 在TermRangeQuery中使用高亮器 * * @throws Exception */ public void testTermRangeQueryHightlighting() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); TermRangeQuery rangeQuery = new TermRangeQuery( FIELD_NAME, new BytesRef("kannedy"), new BytesRef("kznnedy"), true, true); rangeQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); searcher = new IndexSearcher(reader); hits = searcher.search(rangeQuery, 100); int maxNumFragmentsRequired = 2; QueryScorer scorer = new QueryScorer(rangeQuery, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } /** * 在SpanNear查詢中使用高亮器 * * @throws Exception */ public void testSpanNearQueryHightlighting() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); Query query = new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term(FIELD_NAME, "beginning")), new SpanTermQuery(new Term(FIELD_NAME, "kennedy")) }, 3, false); /*Query query = doSearching(new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term(FIELD_NAME, "beginning")), new SpanTermQuery(new Term(FIELD_NAME, "kennedy")) }, 3, false));*/ searcher = new IndexSearcher(reader); hits = searcher.search(query, 100); int maxNumFragmentsRequired = 2; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } /** * 在FuzzyQuery查詢中使用高亮器 * * @throws Exception */ public void testFuzzyQueryHightlighting() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); FuzzyQuery query = new FuzzyQuery(new Term(FIELD_NAME, "kinnedy"), 2); searcher = new IndexSearcher(reader); hits = searcher.search(query, 100); int maxNumFragmentsRequired = 2; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { final Document doc = searcher.doc(hits.scoreDocs[i].doc); String text = doc.get(FIELD_NAME); TokenStream tokenStream = TokenSources.getAnyTokenStream(reader, hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + result); } } /** * 在joinQuery中使用高亮器 * @throws Exception */ public void testToParentBlockJoinQuery() throws Exception { // 建立索引 createIndex(); IndexReader reader = DirectoryReader.open(ramDir); IndexSearcher searcher = new IndexSearcher(reader); //你過濾出域值包含parent的索引文件作為parent BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter( new QueryWrapperFilter(new TermQuery(new Term(FIELD_NAME, "parent")))); //然後通過ToParentBlockJoinQuery在parent中找child索引文件且child索引文件必須符合[域值包含child字元] //我們在建立索引時是通過addDocuments新增的parent和child的,即addDocuments,這裡接收一個documents陣列, //父子關係判定規則是,陣列中最後一個索引為parent,前面剩下的索引文件都作為parent的child,記住child必須在parent前面 //這也是addDocuments和addDocument的區別 Query query = new ToParentBlockJoinQuery(new TermQuery(new Term( FIELD_NAME, "child")), parentFilter, ScoreMode.Total); hits = searcher.search(query, 100); int maxNumFragmentsRequired = 3; QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { String text = "child document"; TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); highlighter.setTextFragmenter(new SimpleFragmenter(50)); String fragment = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); System.out.println("\t" + fragment); } } /** * 測試高亮時對特殊字元進行編碼,如< > & "等等 * 在構造高亮器時傳入SimpleHTMLEncoder即可 * 通過SimpleHTMLFormatter可以自定義高亮時的開始和結束標籤,如:new SimpleHTMLFormatter("<font color=\"red\">","</font>") * 預設是<B> </B> * @throws Exception */ public void testEncoding() throws Exception { String rawDocContent = "\"Smith & sons' prices < 3 and >4\" claims article"; Query query = new RegexpQuery(new Term(FIELD_NAME,"price.*")); QueryScorer scorer = new QueryScorer(query, FIELD_NAME, FIELD_NAME); Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter("<font color=\"red\">","</font>"),new SimpleHTMLEncoder(),scorer); highlighter.setTextFragmenter(new SimpleFragmenter(2000)); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, rawDocContent); String encodedSnippet = highlighter.getBestFragments(tokenStream, rawDocContent, 1, ""); System.out.println(encodedSnippet); } public static void main(String[] args) throws Exception { SimpleHightlightTest simpleHightlightTest = new SimpleHightlightTest(); //simpleHightlightTest.testHighlightingCommonTermsQuery(); // simpleHightlightTest.testHighlightingWithDefaultField(); //simpleHightlightTest.testSimpleTermQueryHighlighter(); simpleHightlightTest.testSimplePhraseQueryHightlighting(); //simpleHightlightTest.testRegexQueryHightlighting(); //simpleHightlightTest.testWildcardQueryHightlighting(); //simpleHightlightTest.testToParentBlockJoinQuery(); //simpleHightlightTest.testSpanNearQueryHightlighting(); //simpleHightlightTest.testFuzzyQueryHightlighting(); //simpleHightlightTest.testTermRangeQueryHightlighting(); //simpleHightlightTest.testEncoding(); } }

程式碼針對不同的查詢都進行高亮顯示測試,這樣可以對比查詢好壞:
這裡寫圖片描述
這是簡單短語查詢後的結果。