I already have seen few similar questions, but I still don't have an answer. I think I have a simple problem.
In sentence
In this text, only Meta Files are important, and Test Generation.
Anything else is irrelevant
I want to index only Meta Files and Test Generation. That means that I need exact match.
Could someone please explain me how to achieve this?
And here is the code:
Analyzer analyzer = new StandardAnalyzer();
Lucene.Net.Store.Directory directory = new RAMDirectory();
indexWriter iwriter = new IndexWriter(directory, analyzer, true);
iwriter.SetMaxFieldLength(10000);
Document doc = new Document();
doc.Add(new Field("textFragment", text, Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
iwriter.AddDocument(doc);
iwriter.Close();
IndexSearcher isearcher = new IndexSearcher(directory);
QueryParser parser = new QueryParser("textFragment", analyzer);
foreach (DictionaryEntry de in OntologyLayer.OntologyLayer.HashTable)
{
List<string> buffer = new List<string>();
double weight = 0;
List<OntologyLayer.Term> list = (List<OntologyLayer.Term>)de.Value;
foreach (OntologyLayer.Term t in list)
{
Hits hits = null;
string label = t.Label;
string[] words = label.Split(' ');
int numOfWords = words.Length;
double wordWeight = 1 / (double)numOfWords;
double localWeight = 0;
foreach (string a in words)
{
try
{
if (!buffer.Contains(a))
{
Lucene.Net.Search.Query query = parser.Parse(a);
hits = isearcher.Search(query);
if (hits != null && hits.Length() > 0)
{
localWeight = localWeight + t.Weight * wordWeight * hits.Length();
}
buffer.Add(a);
}
}
catch (Exception ex)
{}
}
weight = weight + localWeight;
}
sbWeight.AppendLine(weight.ToString());
if (weight > 0)
{
string objectURI = (string)de.Key;
conceptList.Add(objectURI);
}
}
Take a look at Stupid Lucene Tricks: Exact Match, Starts With, Ends With.
Related
I am trying to highlight terms in a string. My code searches along a string and looks for equivalent terms in an index. The code returns found terms ok. However, I would like to return the original string, to the user, that was inputted by the user with found terms highlighted. I am using Lucene 4 because that is the book I am using to learn Lucene. I have a pitiful attempt to get term vectors and such but it iterates through the entire field, I can't figure out how to just get the found terms.. Here is my code:
public class TokenArrayTest {
private static final String INDEX_DIR = "C:/ontologies/Lucene/icnpIndex";
//private static List<Float> levScore = new ArrayList<Float>();
//add key and value pairs of tokens to a map to send to a servlet. key 10,11,12 etc
//private static HashMap<Integer, String> hashMap = new HashMap<Integer, String>();
private static List<String> tokens = new ArrayList<String>();
private static int totalResults=0;
public static void main(String[] pArgs) throws IOException, ParseException, InvalidTokenOffsetsException
{
//counters which detect found term changes to advance the html table to the next cell
int b=1;
int c=1;
String searchText="Mrs. smith has limited mobility and fell out of bed. She needs a feeding assessment. She complained of abdominal pains nuring the night. She woke with a headache and she is due for a shower this morning.";
//Get directory reference
Directory dir = FSDirectory.open(new File(INDEX_DIR));
//Index reader - an interface for accessing a point-in-time view of a lucene index
IndexReader reader = DirectoryReader.open(dir);
//Create lucene searcher. It search over a single IndexReader.
IndexSearcher searcher = new IndexSearcher(reader);
//analyzer with the default stop words
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(searchText));
CharTermAttribute termAttribute = tokenStream.getAttribute(CharTermAttribute.class);
//Query parser to be used for creating TermQuery
QueryParser qp = new QueryParser(Version.LUCENE_40, "Preferred Term", analyzer);
/*add all of the words to an array after they have passed through the analyzer.
* The words are used one by one through the query method later on.
*/
while (tokenStream.incrementToken()) {
tokens.add(termAttribute.toString());
}
//print the top half of the html page
System.out.print("<html>\r\n" +
"\r\n" +
"<head>\r\n" +
"<meta http-equiv=\"Content-Type\" content=\"text/html; charset=windows-1252\">\r\n" +
"\r\n" +
"<title>ICNP results</title>\r\n" +
"</head>\r\n" +
"\r\n" +
"<body>\r\n" +
"\r\n" +
"<p>"+
searchText+"<br>"+
"<p>"+
"<div align=\"center\">\r\n" +
" <center>\r\n" +
" <table border=\"1\" \r\n" +
" <tr>\r\n" +
"<td>\r\n"+
"");
//place each word from the previous array into the query
for(int n=0;n<tokens.size();++n) {
//Create the query
Query query = qp.parse(tokens.get(n));
//Search the lucene documents for the hits
TopDocs hits = searcher.search(query, 20);
//Total found documents
totalResults =totalResults+hits.totalHits;
//print out the score for each searched term
//for (ScoreDoc sd : hits.scoreDocs)
//{
//Document d = searcher.doc(sd.doc);
// System.out.println("Score : " + sd.score);
// }
/** Highlighter Code Start ****/
//Put a html code in here for each found term if need be
Formatter formatter = new SimpleHTMLFormatter("", "");
//Scores text fragments by the number of unique query terms found
QueryScorer scorer = new QueryScorer(query);
//used to markup highlighted terms found in the best sections of a text
Highlighter highlighter = new Highlighter(formatter, scorer);
//It breaks text up into same-size texts but does not split up spans
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer, 20);
//set fragmenter to highlighter
highlighter.setTextFragmenter(fragmenter);
//Iterate over found results
for (int i = 0; i < hits.scoreDocs.length; i++)
{
int docid = hits.scoreDocs[i].doc;
Document doc = searcher.doc(docid);
//Get stored text from found document
String text = doc.get("Preferred Term");
//a pitiful attempt to get term vectors and such like
termsVector = reader.getTermVector(i, "Preferred Term");
termsEnum = termsVector.iterator(termsEnum);
while ( (term = termsEnum.next()) != null ) {
val = term.utf8ToString();
System.out.println("DocId: " + i);
System.out.println(" term: " + val);
System.out.println(" length: " + term.length);
docsAndPositionsEnum = termsEnum.docsAndPositions(null, docsAndPositionsEnum);
if (docsAndPositionsEnum.nextDoc() >= 0) {
int freq = docsAndPositionsEnum.freq();
System.out.println(" freq: " + docsAndPositionsEnum.freq());
for (int j = 0; j < freq; j++) {
System.out.println(" [");
System.out.println(" position: " + docsAndPositionsEnum.nextPosition());
System.out.println(" offset start: " + docsAndPositionsEnum.startOffset());
System.out.println(" offset end: " + docsAndPositionsEnum.endOffset());
System.out.println(" ]");
}
}
}
//Create token stream
TokenStream stream = TokenSources.getAnyTokenStream(reader, docid, "Preferred Term", analyzer);
//Get highlighted text fragments
String[] frags = highlighter.getBestFragments(stream, text,20);
for (String frag : frags)
{
//On the first pass print this html out
if((c==1)&&(b!=c)) {
System.out.println("<select>");
c=b;
}else if((b!=c)) { //and every other time move to the next cell when b changes
System.out.println("</select>"
+ "</td><td>"
+ "<select>");
c=b;
}
System.out.println("<option value='"+frag+"'>"+frag+"</option>");
}
}
b=b+1;
}
dir.close();
b=1;
c=1;
totalResults=0;
//print the bottom half of the html page
System.out.print("</select></td>\r\n" +
" </tr>\r\n" +
" </table>\r\n" +
" </center>\r\n" +
"</div>\r\n" +
"\r\n" +
"</body>\r\n" +
"\r\n" +
"</html>\r\n" +
"");
}
}
I Don't know if possible with lucene v4 but with newer versions it's easy possible with a Highlighter a UnifiedHighlighter.
There are several tutorials in which text highlighting is achieved on different ways (just google it...):
Lucene Search Highlight Example
Lucene UnifiedHighlighter Example
Lucene Highlighter Tutorial with Example
If you start with a new project i would strongly suggest using the most recent version even if your book is based on lucene v4. The book is good to get a basic understanding about how lucene works but using an old version of the library is an instant technical dept which you habe to deal later on. Additional to this a newer version usually provides additional features which may be interesting for you.
For future readers, here is my Plain old java method (POJM) that prints out offsets.
generatePreviewText( analyzer, searchText, tokens, frags );
public static void generatePreviewText(Analyzer analyzer, String inputText, List<String> tokens, String[] frags) throws IOException
{
String contents[]= {inputText};
String[] foundTerms = frags;
//for(int n=0;n<frags.length;++n) {
//System.out.println("Found terms array= "+foundTerms[n]);
// }
Directory directory = new RAMDirectory();
IndexWriterConfig config =
new IndexWriterConfig(Version.LUCENE_40, analyzer);
IndexWriter indexWriter = new IndexWriter(directory, config);
FieldType textFieldType = new FieldType();
textFieldType.setIndexed(true);
textFieldType.setTokenized(true);
textFieldType.setStored(true);
textFieldType.setStoreTermVectors(true);
textFieldType.setStoreTermVectorPositions(true);
textFieldType.setStoreTermVectorOffsets(true);
Document doc = new Document();
Field textField = new Field("content", "", textFieldType);
for (String content : contents) {
textField.setStringValue(content);
doc.removeField("content");
doc.add(textField);
indexWriter.addDocument(doc);
}
indexWriter.commit();
IndexReader indexReader = DirectoryReader.open(directory);
DocsAndPositionsEnum docsAndPositionsEnum = null;
Terms termsVector = null;
TermsEnum termsEnum = null;
BytesRef term = null;
String val = null;
for (int i = 0; i < indexReader.maxDoc(); i++) {
termsVector = indexReader.getTermVector(i, "content");
termsEnum = termsVector.iterator(termsEnum);
while ( (term = termsEnum.next()) != null ) {
val = term.utf8ToString();
// if(foundTerms.get(i)==val) {
System.out.println(" term: " + val);
System.out.println(" length: " + term.length);
docsAndPositionsEnum = termsEnum.docsAndPositions(null, docsAndPositionsEnum);
if (docsAndPositionsEnum.nextDoc() >= 0) {
int freq = docsAndPositionsEnum.freq();
System.out.println(" freq: " + docsAndPositionsEnum.freq());
for (int j = 0; j < freq; j++) {
System.out.println(" [");
System.out.println(" position: " + docsAndPositionsEnum.nextPosition());
System.out.println(" offset start: " + docsAndPositionsEnum.startOffset());
System.out.println(" offset end: " + docsAndPositionsEnum.endOffset());
System.out.println(" ]");
}
}
//}
}
}indexWriter.close();
}
highlighter using lucene.net ( 3.0.3) not working for the below code. If I am searching for a word "dealing" highlighter is showing but if I am searching for a word with wildchar "deal*" then there is no highlighting
protected void btnIndex_Click(object sender, EventArgs e)
{
string indexPath = #"D:\temp\LuceneIndex1";
Lucene.Net.Store.Directory directory = FSDirectory.Open(indexPath);
Analyzer analyzer = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30);
IndexWriter writer = new IndexWriter(directory, analyzer, IndexWriter.MaxFieldLength.UNLIMITED);
IndexReader red = IndexReader.Open(directory, true);
int totDocs = red.MaxDoc;
red.Close();
//Add documents to the index
string text = String.Empty;
text = "One thing that may be of interest, is that if you are dealing with vast quantites of data you may want to create static Field fields and reuse them rather than creating new one each time you rebuild the index. Obviously for this demo the Lucene index is only created once per application run, but in a production application you may build the index every 5 mins or something like that, in which case I would recommend reusing the Field objects by making static fields that get re-used.";
int txts = totDocs;
AddTextToIndex(txts++, text, writer);
writer.Optimize();
writer.Dispose();
//Setup searcher
IndexSearcher searcher = new IndexSearcher(directory);
QueryParser parser = new QueryParser(Lucene.Net.Util.Version.LUCENE_30, "postBody", analyzer);
text = txtSearchData.Text;
Label1.Text = Search(text, searcher, parser, analyzer);
//Clean up everything
searcher.Close();
directory.Close();
}
private static void AddTextToIndex(int txts, string text, IndexWriter writer)
{
Document doc = new Document();
doc.Add(new Field("id", txts.ToString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.Add(new Field("postBody", text, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.AddDocument(doc);
}
private string Search(string text, IndexSearcher searcher, QueryParser parser, Analyzer analyzer)
{
string indexPath = #"D:\temp\LuceneIndex1";
Lucene.Net.Store.Directory directory = FSDirectory.Open(indexPath);
string result = "";
string snip = "";
var booleanQuery = new BooleanQuery();
var fuzzyQuery = new FuzzyQuery(new Term("postBody", text), 0.7f, 3);
booleanQuery.Add(new BooleanClause(fuzzyQuery, Occur.SHOULD));
//Supply conditions
Query query = parser.Parse(text);
FastVectorHighlighter highlighter = getHighlighter();
parser.AllowLeadingWildcard = true;
query = parser.Parse(text);
BooleanQuery.MaxClauseCount = 10;
query = query.Rewrite(IndexReader.Open(directory, true));
query.Rewrite(IndexReader.Open(directory, true));
FieldQuery fieldQuery = highlighter.GetFieldQuery(booleanQuery);
TopScoreDocCollector collector = TopScoreDocCollector.Create(100, true);
searcher.Search(query, collector);
ScoreDoc[] hits = collector.TopDocs().ScoreDocs;
int results = hits.Length;
Console.WriteLine("Found {0} results", results);
for (int i = 0; i < hits.Length; i++)
{
int docId = hits[i].Doc;
float score = hits[i].Score;
Lucene.Net.Documents.Document doc = searcher.Doc(docId);
result = "Score: " + score.ToString() +
" Field: " + doc.Get("id") +
" Field2: " + doc.Get("postBody");
string text1 = doc.Get("postBody");
string[] hight = getFragmentsWithHighlightedTerms(analyzer, query, "postBody", text1, 5, 100, directory);
}
return result + " :::: " + snip;
}
private FastVectorHighlighter getHighlighter()
{
FragListBuilder fragListBuilder = new SimpleFragListBuilder();
FragmentsBuilder fragmentsBuilder = new ScoreOrderFragmentsBuilder(
BaseFragmentsBuilder.COLORED_PRE_TAGS,
BaseFragmentsBuilder.COLORED_POST_TAGS);
return new FastVectorHighlighter(true, true, fragListBuilder,
fragmentsBuilder);
}
private static String[] getFragmentsWithHighlightedTerms(Analyzer analyzer, Query query, string fieldName, string fieldContents, int fragmentSize, int maxsize, Lucene.Net.Store.Directory directory)
{
TokenStream stream = TokenSources.GetTokenStream(fieldName, fieldContents, analyzer);
// SpanScorer scorer = new SpanScorer();//(query, fieldName, new CachingTokenFilter(stream));
query = query.Rewrite(IndexReader.Open(directory, true));
QueryScorer scorer = new QueryScorer(query, fieldName);
scorer.IsExpandMultiTermQuery = true;// (true);
SimpleSpanFragmenter fragmenter = new SimpleSpanFragmenter(scorer, fragmentSize);
Highlighter highlighter = new Highlighter(scorer);
highlighter.TextFragmenter = fragmenter;
highlighter.MaxDocCharsToAnalyze = maxsize;
String[] fragments = highlighter.GetBestFragments(stream, fieldContents, 10);
return fragments;
}
The search method of IndexSearcher in Lucene is not returning any output. The number of documents that are returned by the query is always 0. I had built the index using the following code:
void buildIndex(File indexDir, File trainDir, HashMap<String,Integer> dictionary)
throws IOException, FileNotFoundException {
Directory fsDir = FSDirectory.open(indexDir);
IndexWriterConfig iwConf
= new IndexWriterConfig(VERSION,mAnalyzer);
iwConf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
IndexWriter indexWriter
= new IndexWriter(fsDir,iwConf);
File file = trainDir;
String csvFilename = "/home/serene/Downloads/IndustryClassification/Train/Training.csv";
CSVReader csvReader = new CSVReader(new FileReader(csvFilename),'\t');
String[] row = null;
while((row = csvReader.readNext()) != null) {
Document d = new Document();
String companyname = row[1];
String NAICSID = row[2];
//System.out.println(NAICSID);
String description = row[4];
d.add(new TextField("company",companyname,Store.YES));
d.add(new StringField("category",NAICSID,Store.YES));
dictionary.put(NAICSID, 1);
d.add(new TextField("description", description, Store.NO));
//System.out.println(d.toString());
indexWriter.addDocument(d);
}
csvReader.close();
int numDocs = indexWriter.numDocs();
indexWriter.forceMerge(1);
indexWriter.commit();
indexWriter.close();
System.out.println("index=" + indexDir.getName());
System.out.println("num docs=" + numDocs);
}
When trying to get the output for a test query using the following code, I am not getting any output for the categories as scoreDocs.length is always 0 and the code within the for loop isn't executed.
void testIndex(File indexDir, File testDir, Set<String>NEWSGROUPS)
throws IOException, FileNotFoundException, ParseException {
Directory fsDir = FSDirectory.open(indexDir);
DirectoryReader reader = DirectoryReader.open(fsDir);
IndexSearcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer(VERSION);
System.out.print("inside testIndex");
int[][] confusionMatrix
= new int[NEWSGROUPS.size()][NEWSGROUPS.size()];
String csvFilename = "/home/serene/Downloads/IndustryClassification/Test/Test.csv";
CSVReader csvReader = new CSVReader(new FileReader(csvFilename), '\t');
String[] row = null;
while((row = csvReader.readNext()) != null) {
String companyname = row[1];
String NAICSID = row[2];
String description = row[4];
Query query = new QueryParser(Version.LUCENE_44,"contents",analyzer).parse(QueryParser.escape(description));
System.out.print(query +"\n");
TopDocs hits = searcher.search(query,3);
ScoreDoc[] scoreDocs = hits.scoreDocs;
System.out.println(hits.totalHits);
for (int n = 0; n < scoreDocs.length; ++n) {
ScoreDoc sd = scoreDocs[n];
int docId = sd.doc;
Document d = searcher.doc(docId);
String category = d.get("category");
System.out.println(category);
}
}
csvReader.close();
}
Replace "contents" with with any of the field (company .. ) that you indexed.
I try to build a index with some facets, I'm folliwing the User Guide.
However, I run into a problem; the next line in the User Guide gives errors.
DocumentBuilder categoryDocBuilder = new CategoryDocumentBuilder(taxo);
Both the DocumentBuilder and the CatergoryDocumentBuilder do not exist in lucene-facet..
I cannot find the API changes in the Jira-issues.. Does anyone have this working and cares to share how it should be done?
I figured it out using the benchmark code as inspiration.
Indexing
Directory dir = FSDirectory.open( new File("index" ));
Directory dir_taxo = FSDirectory.open( new File("index-taxo" ));
IndexWriter writer = newIndexWriter(dir);
TaxonomyWriter taxo = new DirectoryTaxonomyWriter(dir_taxo, OpenMode.CREATE);
FacetFields ff= new FacetFields(taxo);
//for all documents:
d=new Document();
List<CategoryPath>=new ArrayList<CategoryPath>();
for (all fields in doc)
{
d.addField( ....)
}
for (all categories in doc)
{
CategoryPath cp = new CategoryPath(field, value);
categories.add( cp);
taxo.addCategory(cp); //not sure if necessary
}
ff.addFields(d, categories);
w.addDocument( d );
Searching:
Directory dir = FSDirectory.open( new File("index" ));
Directory dir_taxo = FSDirectory.open( new File("index-taxo" ));
final DirectoryReader indexReader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(indexReader);
TaxonomyReader taxo = new DirectoryTaxonomyReader(dir_taxo);
Query q = new TermQuery(new Term(...));
TopScoreDocCollector tdc = TopScoreDocCollector.create(1, true);
FacetSearchParams facetSearchParams = new FacetSearchParams(new CountFacetRequest(new CategoryPath("mycategory"),10));
FacetsCollector facetsCollector = new FacetsCollector(facetSearchParams, indexReader, taxo);
long ts= System.currentTimeMillis();
searcher.search(q, MultiCollector.wrap(tdc, facetsCollector));
List<FacetResult> res = facetsCollector.getFacetResults();
long te= System.currentTimeMillis();
for (FacetResult fr:res)
{
for ( FacetResultNode sr : fr.getFacetResultNode().getSubResults())
{
System.out.println(String.format( "%s : %f", sr.getLabel(), sr.getValue()));
}
}
System.out.println(String.format("Search took: %d millis", (te-ts)));
}
I'm not familiar with Lucene 4.1 only version 2.9.
But when I'm creating facets inside my result I normally use the Lucene.Net.Search.SimpleFacetedSearch.dll, below a sample code of my project.
Wouter
Dictionary<String, long> facetedResults = new Dictionary<String, long>();
try
{
SimpleFacetedSearch.MAX_FACETS = int.MaxValue;
SimpleFacetedSearch sfs = new SimpleFacetedSearch(indexReader, field);
SimpleFacetedSearch.Hits facetedHits = sfs.Search(query);
long totalHits = facetedHits.TotalHitCount;
for (int ihitsPerFacet = 0; ihitsPerFacet < facetedHits.HitsPerFacet.Count(); ihitsPerFacet++)
{
long hitCountPerGroup = facetedHits.HitsPerFacet[ihitsPerFacet].HitCount;
SimpleFacetedSearch.FacetName facetName = facetedHits.HitsPerFacet[ihitsPerFacet].Name;
if (hitCountPerGroup > 0)
facetedResults.Add(facetName.ToString(), hitCountPerGroup);
}
}
catch (Exception ex)
{
facetedResults.Add(ex.Message, -1);
}
At index time I am boosting certain document in this way:
if (myCondition)
{
document.SetBoost(1.2f);
}
But at search time documents with all the exact same qualities but some passing and some failing myCondition all end up having the same score.
And here is the search code:
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.Add(new TermQuery(new Term(FieldNames.HAS_PHOTO, "y")), BooleanClause.Occur.MUST);
booleanQuery.Add(new TermQuery(new Term(FieldNames.AUTHOR_TYPE, AuthorTypes.BLOGGER)), BooleanClause.Occur.MUST_NOT);
indexSearcher.Search(booleanQuery, 10);
Can you tell me what I need to do to get the documents that were boosted to get a higher score?
Many Thanks!
Lucene encodes boosts on a single byte (although a float is generally encoded on four bytes) using the SmallFloat#floatToByte315 method. As a consequence, there can be a big loss in precision when converting back the byte to a float.
In your case SmallFloat.byte315ToFloat(SmallFloat.floatToByte315(1.2f)) returns 1f because 1f and 1.2f are too close to each other. Try using a bigger boost so that your documents get different scores. (For exemple 1.25, SmallFloat.byte315ToFloat(SmallFloat.floatToByte315(1.25f)) gives 1.25f.)
Here is the requested test program that was too long to post in a comment.
class Program
{
static void Main(string[] args)
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
const string FIELD = "name";
for (int i = 0; i < 10; i++)
{
StringBuilder notes = new StringBuilder();
notes.AppendLine("This is a note 123 - " + i);
string text = notes.ToString();
Document doc = new Document();
var field = new Field(FIELD, text, Field.Store.YES, Field.Index.NOT_ANALYZED);
if (i % 2 == 0)
{
field.SetBoost(1.5f);
doc.SetBoost(1.5f);
}
else
{
field.SetBoost(0.1f);
doc.SetBoost(0.1f);
}
doc.Add(field);
writer.AddDocument(doc);
}
writer.Commit();
//string TERM = QueryParser.Escape("*+*");
string TERM = "T";
IndexSearcher searcher = new IndexSearcher(dir);
Query query = new PrefixQuery(new Term(FIELD, TERM));
var hits = searcher.Search(query);
int count = hits.Length();
Console.WriteLine("Hits - {0}", count);
for (int i = 0; i < count; i++)
{
var doc = hits.Doc(i);
Console.WriteLine(doc.ToString());
var explain = searcher.Explain(query, i);
Console.WriteLine(explain.ToString());
}
}
}