Example usage for org.apache.lucene.search IndexSearcher getIndexReader

List of usage examples for org.apache.lucene.search IndexSearcher getIndexReader

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getIndexReader.

Prototype

public IndexReader getIndexReader() 

Source Link

Document

Return the IndexReader this searches.

Usage

From source file:com.leavesfly.lia.tool.HighlightTest.java

License:Apache License

public void testHits() throws Exception {
    IndexSearcher searcher = new IndexSearcher(TestUtil.getBookIndexDirectory());
    TermQuery query = new TermQuery(new Term("title", "action"));
    TopDocs hits = searcher.search(query, 10);

    QueryScorer scorer = new QueryScorer(query, "title");
    Highlighter highlighter = new Highlighter(scorer);
    highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer));

    Analyzer analyzer = new SimpleAnalyzer();

    for (ScoreDoc sd : hits.scoreDocs) {
        Document doc = searcher.doc(sd.doc);
        String title = doc.get("title");

        TokenStream stream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), sd.doc, "title", doc,
                analyzer);/*from  w ww  .  j av a  2 s  .c o  m*/
        String fragment = highlighter.getBestFragment(stream, title);

        System.out.println(fragment);
    }
}

From source file:com.m3958.apps.pcms.lucene.facet.MultiCategoryListsFacetsExample.java

License:Apache License

/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Count both "Publish Date" and "Author" dimensions
    FacetSearchParams fsp = new FacetSearchParams(indexingParams,
            new CountFacetRequest(new CategoryPath("Publish Date"), 10),
            new CountFacetRequest(new CategoryPath("Author"), 10));

    // Aggregatses the facet counts
    FacetsCollector fc = FacetsCollector.create(fsp, searcher.getIndexReader(), taxoReader);

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query, and use MultiCollector to
    // wrap collecting the "normal" hits and also facets:
    searcher.search(new MatchAllDocsQuery(), fc);

    // Retrieve results
    List<FacetResult> facetResults = fc.getFacetResults();

    indexReader.close();//from   ww w  .  j  a va2  s.  co m
    taxoReader.close();

    return facetResults;
}

From source file:com.m3958.apps.pcms.lucene.facet.SimpleFacetsExample.java

License:Apache License

/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Count both "Publish Date" and "Author" dimensions
    FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(new CategoryPath("Publish Date"), 10),
            new CountFacetRequest(new CategoryPath("Author"), 10));

    // Aggregates the facet counts
    FacetsCollector fc = FacetsCollector.create(fsp, searcher.getIndexReader(), taxoReader);

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query, and use MultiCollector to
    // wrap collecting the "normal" hits and also facets:
    searcher.search(new MatchAllDocsQuery(), fc);

    // Retrieve results
    List<FacetResult> facetResults = fc.getFacetResults();

    indexReader.close();//from w ww.ja  v  a  2  s .c  om
    taxoReader.close();

    return facetResults;
}

From source file:com.m3958.apps.pcms.lucene.facet.SimpleFacetsExample.java

License:Apache License

/** User drills down on 'Publish Date/2010'. */
private List<FacetResult> drillDown() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Now user drills down on Publish Date/2010:
    FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(new CategoryPath("Author"), 10));

    // Passing no baseQuery means we drill down on all
    // documents ("browse only"):
    DrillDownQuery q = new DrillDownQuery(fsp.indexingParams);
    q.add(new CategoryPath("Publish Date/2010", '/'));
    FacetsCollector fc = FacetsCollector.create(fsp, searcher.getIndexReader(), taxoReader);
    searcher.search(q, fc);/*from ww  w.ja va2 s . co  m*/

    // Retrieve results
    List<FacetResult> facetResults = fc.getFacetResults();

    indexReader.close();
    taxoReader.close();

    return facetResults;
}

From source file:com.mathworks.xzheng.indexing.IndexingTest.java

License:Apache License

protected int getHitCount(String fieldName, String searchString) throws IOException {
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory)); //4
    Term t = new Term(fieldName, searchString);
    Query query = new TermQuery(t); //5
    int hitCount = TestUtil.hitCount(searcher, query); //6
    searcher.getIndexReader().close();
    return hitCount;
}

From source file:com.mathworks.xzheng.meetlucene.Searcher.java

License:Apache License

public static void search(String indexDir, String q) throws IOException, ParseException {

    Directory dir = FSDirectory.open(new File(indexDir)); //3
    IndexSearcher is = new IndexSearcher(DirectoryReader.open(dir)); //3

    QueryParser parser = new QueryParser(Version.LUCENE_46, // 4
            "contents", //4
            new StandardAnalyzer( //4
                    Version.LUCENE_46)); //4
    Query query = parser.parse(q); //4
    long start = System.currentTimeMillis();
    TopDocs hits = is.search(query, 10); //5
    long end = System.currentTimeMillis();

    System.err.println("Found " + hits.totalHits + //6
            " document(s) (in " + (end - start) + // 6
            " milliseconds) that matched query '" + // 6
            q + "':"); // 6

    for (ScoreDoc scoreDoc : hits.scoreDocs) {
        Document doc = is.doc(scoreDoc.doc); //7
        System.out.println(doc.get("fullpath")); //8
    }//from  www. j  av a2  s  .c o m

    is.getIndexReader().close(); //9
}

From source file:com.mathworks.xzheng.tools.FastVectorHighlighterSample.java

License:Apache License

static void searchIndex(String filename) throws Exception {
    QueryParser parser = new QueryParser(Version.LUCENE_46, F, analyzer);
    Query query = parser.parse(QUERY);
    FastVectorHighlighter highlighter = getHighlighter(); // #C
    FieldQuery fieldQuery = highlighter.getFieldQuery(query); // #D
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(dir));
    TopDocs docs = searcher.search(query, 10);

    FileWriter writer = new FileWriter(filename);
    writer.write("<html>");
    writer.write("<body>");
    writer.write("<p>QUERY : " + QUERY + "</p>");
    for (ScoreDoc scoreDoc : docs.scoreDocs) {
        String snippet = highlighter.getBestFragment( // #E
                fieldQuery, searcher.getIndexReader(), // #E
                scoreDoc.doc, F, 100); // #E
        if (snippet != null) {
            writer.write(scoreDoc.doc + " : " + snippet + "<br/>");
        }// w  ww .j  ava  2  s.c  o  m
    }
    writer.write("</body></html>");
    writer.close();

}

From source file:com.mathworks.xzheng.tools.HighlightTest.java

License:Apache License

public void testHits() throws Exception {
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(TestUtil.getBookIndexDirectory()));
    TermQuery query = new TermQuery(new Term("title", "action"));
    TopDocs hits = searcher.search(query, 10);

    QueryScorer scorer = new QueryScorer(query, "title");
    Highlighter highlighter = new Highlighter(scorer);
    highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer));

    Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_46);

    for (ScoreDoc sd : hits.scoreDocs) {
        Document doc = searcher.doc(sd.doc);
        String title = doc.get("title");

        TokenStream stream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), sd.doc, "title", doc,
                analyzer);/*from   www  . j  a  v  a  2s.co m*/
        String fragment = highlighter.getBestFragment(stream, title);

        System.out.println(fragment);
    }
}

From source file:com.nearinfinity.blur.manager.writer.nrt.SearcherManager.java

License:Apache License

/**
 * Returns <code>true</code> if no changes have occured since this searcher
 * ie. reader was opened, otherwise <code>false</code>.
 * //from   ww w  .  j a v a 2 s . c om
 * @see IndexReader#isCurrent()
 */
public boolean isSearcherCurrent() throws CorruptIndexException, IOException {
    final IndexSearcher searcher = acquire();
    try {
        return searcher.getIndexReader().isCurrent();
    } finally {
        release(searcher);
    }
}

From source file:com.nearinfinity.blur.manager.writer.nrt.SearcherManager.java

License:Apache License

/**
 * Release the searcher previously obtained with {@link #acquire}.
 * /* w ww.  ja  v  a2s  .com*/
 * <p>
 * <b>NOTE</b>: it's safe to call this after {@link #close}.
 */
public void release(IndexSearcher searcher) throws IOException {
    assert searcher != null;
    searcher.getIndexReader().decRef();
}