package org.apache.lucene.demo;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.HitCollector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TopDocCollector;
/** Simple command-line based search demo. */
public class SearchFiles {
/** Use the norms from one field for all fields. Norms are read into memory,
* using a byte of memory per document per searched field. This can cause
* search of large collections with a large number of fields to run out of
* memory. If all of the fields contain only a single token, then the norms
* are all identical, then single norm vector may be shared. */
private static class OneNormsReader extends FilterIndexReader {
private String field;
public OneNormsReader(IndexReader in, String field) {
super(in);
this.field = field;
}
public byte[] norms(String field) throws IOException {
return in.norms(this.field);
}
}
private SearchFiles() {}
/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
String index = "index";
String field = "content";
boolean multipleFields = true;
IndexReader reader = IndexReader.open(index);//IndexReader 根据 index 指定的路径 访问索引,扫描索引。
Searcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer();
BufferedReader in =new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
while (true) {
System.out.println("Enter query: ");
String line = in.readLine();
if (line == null || line.length() == -1)
break;
line = line.trim();
if (line.length() == 0)
break;
if (!multipleFields) {
QueryParser parser = new QueryParser(field, analyzer);
Query query = parser.parse(field);// 根据指定的单个field查询
parser.setDefaultOperator(parser.OR_OPERATOR.OR);
//多个字符串以空格份格时,OR : a b 含有a或b均可。
//AND a b 必须含有 a和b。
doPagingSearch(searcher, query);
} else {
String[] fields = new String[2];
fields[0] = "contents";
fields[1] = "name";
BooleanClause.Occur[] flags = new BooleanClause.Occur[] {
BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD };
//根据多个field查询时。should,should查询字段在 name或是contents任何一个中,均做为一条记录返回。
//must,must .必须 即在 name 中,又在contents 中。
Query query = MultiFieldQueryParser.parse(line, fields, flags,
analyzer);
doPagingSearch(searcher, query);
}
}
reader.close();
}
/**
* This method uses a custom HitCollector implementation which simply prints out
* the docId and score of every matching document.
*
* This simulates the streaming search use case, where all hits are supposed to
* be processed, regardless of their relevance.
*/
public static void doPagingSearch( Searcher searcher, Query query) throws IOException {
// Collect enough docs to show 5 pages
TopDocCollector collector = new TopDocCollector(20);//最多结果集个数。
searcher.search(query, collector);
ScoreDoc[] hits = collector.topDocs().scoreDocs;
int numTotalHits = collector.getTotalHits();//搜索到的符合条件的记录总条数。
System.out.println(numTotalHits + " total matching documents");
for(int i=0;i<numTotalHits;i++){
Document doc = searcher.doc(hits[i].doc);
System.out.println("path.."+doc.get("path"));
System.out.println("modified.."+doc.get("modified"));
System.out.println("name.."+doc.get("name"));
System.out.println("parent"+doc.get("parent"));
System.out.println("content..."+doc.get("content"));
}
}
}