Example usage for org.apache.lucene.index DirectoryReader open

List of usage examples for org.apache.lucene.index DirectoryReader open

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader open.

Prototype

public static DirectoryReader open(final IndexCommit commit) throws IOException 

Source Link

Document

Expert: returns an IndexReader reading the index in the given IndexCommit .

Usage

From source file:com.orientechnologies.lucene.test.LuceneNativeFacet.java

License:Apache License

/** User runs a query and counts facets. */
private List<FacetResult> facetsWithSearch() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    FacetsCollector fc = new FacetsCollector();

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query:
    FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

    // Retrieve results
    List<FacetResult> results = new ArrayList<FacetResult>();

    // Count both "Publish Date" and "Author" dimensions
    Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc);
    results.add(facets.getTopChildren(10, "Author"));
    results.add(facets.getTopChildren(10, "Publish Date"));

    indexReader.close();//from w w w . j  av  a 2s .c  o m
    taxoReader.close();

    return results;
}

From source file:com.orientechnologies.lucene.test.LuceneNativeFacet.java

License:Apache License

/** User runs a query and counts facets only without collecting the matching documents.*/
private List<FacetResult> facetsOnly() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    FacetsCollector fc = new FacetsCollector();

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query:
    searcher.search(new MatchAllDocsQuery(), null /*Filter */, fc);

    // Retrieve results
    List<FacetResult> results = new ArrayList<FacetResult>();

    // Count both "Publish Date" and "Author" dimensions
    Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc);

    results.add(facets.getTopChildren(10, "Author"));
    results.add(facets.getTopChildren(10, "Publish Date"));

    indexReader.close();/*from  ww  w.jav  a 2 s  .c o  m*/
    taxoReader.close();

    return results;
}

From source file:com.orientechnologies.lucene.test.LuceneNativeFacet.java

License:Apache License

/** User drills down on 'Publish Date/2010', and we
 *  return facets for both 'Publish Date' and 'Author',
 *  using DrillSideways. *//*from  www  . j  a  va 2  s  . co  m*/
private List<FacetResult> drillSideways() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Passing no baseQuery means we drill down on all
    // documents ("browse only"):
    DrillDownQuery q = new DrillDownQuery(config);

    // Now user drills down on Publish Date/2010:
    q.add("Publish Date", "2010");

    DrillSideways ds = new DrillSideways(searcher, config, taxoReader);
    DrillSidewaysResult result = ds.search(q, 10);

    // Retrieve results

    List<FacetResult> facets = result.facets.getAllDims(10);

    indexReader.close();
    taxoReader.close();

    return facets;
}

From source file:com.orientechnologies.lucene.test.LuceneVsLuceneTest.java

License:Apache License

@Test
public void testLuceneVsLucene() throws IOException, ParseException {
    InputStream stream = ClassLoader.getSystemResourceAsStream("testLuceneIndex.sql");

    databaseDocumentTx.command(new OCommandScript("sql", getScriptFromStream(stream))).execute();

    for (ODocument oDocument : databaseDocumentTx.browseClass("Song")) {

        String title = oDocument.field("title");
        if (title != null) {
            Document d = new Document();
            d.add(new Field("title", title, Field.Store.NO, Field.Index.ANALYZED));

            indexWriter.addDocument(d);//w w w .  ja  v  a2 s  .c o  m

        }
    }

    indexWriter.close();
    IndexReader reader = DirectoryReader.open(getDirectory());
    IndexSearcher searcher = new IndexSearcher(reader);
    Query query = new MultiFieldQueryParser(OLuceneIndexManagerAbstract.LUCENE_VERSION,
            new String[] { "title" }, new StandardAnalyzer(OLuceneIndexManagerAbstract.LUCENE_VERSION))
                    .parse("down the");
    final TopDocs docs = searcher.search(query, Integer.MAX_VALUE);
    ScoreDoc[] hits = docs.scoreDocs;
    List<ODocument> oDocs = databaseDocumentTx
            .query(new OSQLSynchQuery<ODocument>("select *,$score from Song where title LUCENE \"down the\""));
    Assert.assertEquals(oDocs.size(), hits.length);

    int i = 0;
    for (ScoreDoc hit : hits) {
        Assert.assertEquals(oDocs.get(i).field("$score"), hit.score);
        i++;
    }
    reader.close();

}

From source file:com.orientechnologies.spatial.sandbox.LuceneGeoTest.java

License:Apache License

@Test
public void geoIntersectTest() throws IOException, ParseException {

    RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(
            new GeohashPrefixTree(JtsSpatialContext.GEO, 11), "location");

    strategy.setDistErrPct(0);/*from w  w w . j av a2 s  .co m*/

    IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
    final RAMDirectory directory = new RAMDirectory();
    final IndexWriter writer = new IndexWriter(directory, conf);

    Shape point = JtsSpatialContext.GEO.getWktShapeParser().parse("POINT (9.4714708 47.6819432)");

    Shape polygon = JtsSpatialContext.GEO.getWktShapeParser().parse(
            "POLYGON((9.481201171875 47.64885294675266,9.471416473388672 47.65128140482982,9.462661743164062 47.64781214443791,9.449443817138672 47.656947367880335,9.445838928222656 47.66110972448931,9.455795288085938 47.667352637215,9.469013214111328 47.67255449415724,9.477081298828125 47.679142768657066,9.490299224853516 47.678680460743834,9.506263732910156 47.679258344995326,9.51364517211914 47.68191653011071,9.518795013427734 47.677177931734406,9.526691436767578 47.679489496903706,9.53390121459961 47.67139857075435,9.50918197631836 47.66180341832901,9.50815200805664 47.6529003141482,9.51192855834961 47.64654002455372,9.504375457763672 47.64237650648966,9.49270248413086 47.649662445325035,9.48617935180664 47.65151268066222,9.481201171875 47.64885294675266))");

    Document doc = new Document();

    Assert.assertNotEquals(point.relate(polygon), SpatialRelation.INTERSECTS);
    for (IndexableField f : strategy.createIndexableFields(point)) {
        doc.add(f);
    }

    writer.addDocument(doc);
    writer.commit();

    SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, polygon.getBoundingBox());
    Filter filter = strategy.makeFilter(args);
    IndexReader reader = DirectoryReader.open(directory);

    IndexSearcher searcher = new IndexSearcher(reader);

    TopDocs search = searcher.search(new MatchAllDocsQuery(), filter, 1000);
    Assert.assertEquals(search.totalHits, 0);

    reader.close();
    writer.close();
}

From source file:com.ostrichemulators.semtool.search.GraphTextSearch.java

License:Open Source License

/**
 * Replaces the current index with the data from the given graph
 *
 * @param graph//from  w w  w.j a v a 2s. com
 * @param engine
 * @throws java.io.IOException
 */
public void index(Graph<SEMOSSVertex, SEMOSSEdge> graph, IEngine engine) throws IOException {
    log.trace("asking to update search index");

    if (indexing) {
        log.debug("already indexing");
    }

    log.debug("indexing graph for searchbar");
    indexing = true;
    Date lastIndexed = new Date();
    vertStore.clear();

    // pre-fetch the stuff we know we're going to need
    RetrievingLabelCache rlc = new RetrievingLabelCache(engine);
    Set<IRI> needLabels = new HashSet<>();
    for (SEMOSSEdge e : graph.getEdges()) {
        needLabels.addAll(e.getPropertyKeys());
    }
    for (SEMOSSVertex e : graph.getVertices()) {
        needLabels.addAll(e.getPropertyKeys());
    }
    rlc.putAll(Utility.getInstanceLabels(needLabels, engine));

    // now we can run the indexer
    RepositoryIndexer ri = new RepositoryIndexer(rlc);
    for (SEMOSSEdge e : graph.getEdges()) {
        vertStore.put(e.getIRI(), e);
        ri.handleProperties(e.getIRI(), e.getValues());
    }
    for (SEMOSSVertex v : graph.getVertices()) {
        vertStore.put(v.getIRI(), v);
        ri.handleProperties(v.getIRI(), v.getValues());
    }

    ri.finish();

    try {
        if (null == reader) {
            reader = DirectoryReader.open(ramdir);
        } else {
            DirectoryReader rdr = DirectoryReader.openIfChanged(reader);
            if (null != rdr) {
                reader = rdr;
            }
        }

        searcher = new IndexSearcher(reader);
    } catch (IOException ioe) {
        throw new IOException("cannot read newly-created search index", ioe);
    } finally {
        indexing = false;
        log.debug("done indexing graph: " + Utility.getDuration(lastIndexed, new Date()));
    }
}

From source file:com.parallax.server.blocklyprop.servlets.HelpSearchServlet.java

private IndexSearcher initialize() {
    if (indexSearcher != null) {
        return indexSearcher;
    }//from   w w  w  . jav  a 2 s.co  m
    try {
        File luceneIndexLocation = new File(new File(System.getProperty("user.home")),
                configuration.getString("help.lucene", DEFAULT_LUCENE_DIRECTORY));
        directory = FSDirectory.open(luceneIndexLocation.toPath());
        indexReader = DirectoryReader.open(directory);
        indexSearcher = new IndexSearcher(indexReader);
        return indexSearcher;
    } catch (IOException ex) {
        return null;
    }
}

From source file:com.ponysdk.sample.client.page.addon.SelectizeAddon.java

License:Apache License

public SelectizeAddon() {
    super(Element.newInput());
    setTerminalHandler(this);

    ///*from w  ww . ja v a  2s  .  c  o  m*/
    final Analyzer analyzer = new StandardAnalyzer();
    final Directory directory = new RAMDirectory();

    final IndexWriterConfig config = new IndexWriterConfig(analyzer);
    IndexWriter writer;
    try {
        writer = new IndexWriter(directory, config);
        final Document doc = new Document();
        final String text = "Test de ouf";

        final FieldType fieldType = new FieldType();
        fieldType.setIndexOptions(IndexOptions.NONE);
        fieldType.setStored(true);
        fieldType.setTokenized(false);
        doc.add(new Field("id", "12", fieldType));
        doc.add(new Field("fieldname", text, TextField.TYPE_STORED));

        writer.addDocument(doc);

        addAssetsType(writer);
        addTenor(writer);
        addClients(writer);
        addSide(writer);

        writer.close();
    } catch (final IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    try {
        // Now search the index:
        final DirectoryReader ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);
        // Parse a simple query that searches for "text":
        // final QueryParser parser = new QueryParser("fieldname",
        // analyzer);
        // parser.setFuzzyMinSim(2f);

        final Term term = new Term("fieldname", "indesfed");
        final Query query = new FuzzyQuery(term);
        // final TopDocs hits = isearcher.search(query, 1000).scoreDocs;

        // final Query query = parser.parse("indeed");
        final ScoreDoc[] hits = isearcher.search(query, 1000).scoreDocs;
        // Iterate through the results:
        for (final ScoreDoc hit : hits) {
            System.err.println("Score : " + hit.score);
            final Document hitDoc = isearcher.doc(hit.doc);
            System.err.println("Found document" + hitDoc.getField("fieldname").stringValue());
        }
        // ireader.close();
        // directory.close();
    } catch (final Exception exception) {
        exception.printStackTrace();
    }

    // <input type="text" id="input-tags3" class="demo-default"
    // value="science,biology,chemistry,physics">
}

From source file:com.querydsl.lucene5.LuceneQueryTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    final QDocument entityPath = new QDocument("doc");
    title = entityPath.title;/*  w w  w  . j  a v a2  s .c  o  m*/
    year = entityPath.year;
    gross = entityPath.gross;

    idx = new RAMDirectory();
    writer = createWriter(idx);

    writer.addDocument(createDocument("Jurassic Park", "Michael Crichton", "It's a UNIX system! I know this!",
            1990, 90.00));
    writer.addDocument(
            createDocument("Nummisuutarit", "Aleksis Kivi", "ESKO. Ja iloitset ja riemuitset?", 1864, 10.00));
    writer.addDocument(createDocument("The Lord of the Rings", "John R. R. Tolkien",
            "One Ring to rule them all, One Ring to find them, One Ring to bring them all and in the darkness bind them",
            1954, 89.00));
    writer.addDocument(createDocument("Introduction to Algorithms",
            "Thomas H. Cormen, Charles E. Leiserson, Ronald L. Rivest, and Clifford Stein", "Bubble sort", 1990,
            30.50));

    writer.close();

    IndexReader reader = DirectoryReader.open(idx);
    searcher = new IndexSearcher(reader);
    query = new LuceneQuery(new LuceneSerializer(true, true), searcher);
}

From source file:com.querydsl.lucene5.LuceneQueryTest.java

License:Apache License

@Test
public void Empty_Index_Should_Return_Empty_List() throws Exception {
    idx = new RAMDirectory();

    writer = createWriter(idx);/*from  www .j  a va 2 s. co  m*/
    writer.close();
    IndexReader reader = DirectoryReader.open(idx);
    searcher = new IndexSearcher(reader);
    query = new LuceneQuery(new LuceneSerializer(true, true), searcher);
    assertTrue(query.fetch().isEmpty());
}