Example usage for org.apache.lucene.index DirectoryReader open

List of usage examples for org.apache.lucene.index DirectoryReader open

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader open.

Prototype

public static DirectoryReader open(final IndexCommit commit) throws IOException 

Source Link

Document

Expert: returns an IndexReader reading the index in the given IndexCommit .

Usage

From source file:com.querydsl.lucene5.LuceneSerializerNotTokenizedTest.java

License:Apache License

@Before
public void Before() throws Exception {
    serializer = new LuceneSerializer(false, false);
    idx = new RAMDirectory();
    IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer())
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    writer = new IndexWriter(idx, config);

    writer.addDocument(createDocument(clooney));
    writer.addDocument(createDocument(pitt));

    Document document = new Document();
    for (String movie : Arrays.asList("Interview with the Vampire", "Up in the Air")) {
        document.add(new Field("movie", movie, Store.YES, Index.NOT_ANALYZED));
    }/*from w  w  w  .  j  a  va 2  s.c  om*/
    writer.addDocument(document);

    writer.close();

    IndexReader reader = DirectoryReader.open(idx);
    searcher = new IndexSearcher(reader);
}

From source file:com.querydsl.lucene5.LuceneSerializerTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    serializer = new LuceneSerializer(true, true);
    entityPath = new PathBuilder<Object>(Object.class, "obj");
    title = entityPath.getString("title");
    author = entityPath.getString("author");
    text = entityPath.getString("text");
    publisher = entityPath.getString("publisher");
    year = entityPath.getNumber("year", Integer.class);
    rating = entityPath.getString("rating");
    gross = entityPath.getNumber("gross", Double.class);
    titles = entityPath.getCollection("title", String.class, StringPath.class);

    longField = entityPath.getNumber("longField", Long.class);
    shortField = entityPath.getNumber("shortField", Short.class);
    byteField = entityPath.getNumber("byteField", Byte.class);
    floatField = entityPath.getNumber("floatField", Float.class);

    idx = new RAMDirectory();
    config = new IndexWriterConfig(new StandardAnalyzer()).setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    writer = new IndexWriter(idx, config);

    writer.addDocument(createDocument());

    writer.close();//from ww  w . j  a v  a2  s . com

    IndexReader reader = DirectoryReader.open(idx);
    searcher = new IndexSearcher(reader);
}

From source file:com.radialpoint.word2vec.lucene.SearchFiles.java

License:Open Source License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava com.radialpoint.word2vec.lucene.SearchFiles [-index dir] [-vectors v] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);//w w  w. ja va2  s.co  m
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    String vectors = "vectors";
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-vectors".equals(args[i])) {
            vectors = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    // Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
    final File vectorsFile = new File(vectors);
    Analyzer analyzer = new Analyzer() {

        @SuppressWarnings("deprecation")
        @Override
        protected TokenStreamComponents createComponents(final String fieldName, final java.io.Reader reader) {
            final StandardTokenizer src = new StandardTokenizer(Version.LUCENE_40, reader);
            src.setMaxTokenLength(15);
            TokenStream tok = new StandardFilter(Version.LUCENE_40, src);
            tok = new LowerCaseFilter(Version.LUCENE_40, tok);
            tok = new StopFilter(Version.LUCENE_40, tok, StandardAnalyzer.STOP_WORDS_SET);
            TokenStream baseTok = tok;
            if (vectorsFile.exists()) {
                try {
                    tok = new Word2VecFilter(tok,
                            new QueryExpander(new Vectors(new FileInputStream(vectorsFile)), true,
                                    TermSelection.CUT_75_ABS),
                            3, false);
                } catch (IOException e) {
                    e.printStackTrace();
                    tok = baseTok;
                }
            }
            return new TokenStreamComponents(src, tok) {
                @Override
                protected void setReader(final java.io.Reader reader) throws IOException {
                    src.setMaxTokenLength(15);
                    super.setReader(reader);
                }
            };
        }
    };

    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8"));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }
    @SuppressWarnings("deprecation")
    QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer);
    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:com.recetario.search.RecetarioSearcher.java

public void Open() throws IOException {
    this.directory = FSDirectory.open(new File(this.IndexDirectory));

    this.directoryReader = DirectoryReader.open(directory);

    this.searcher = new IndexSearcher(this.directoryReader);

    this.analyzer = new StandardAnalyzer();

    this.directory.close();
}

From source file:com.ricky.codelab.lucene.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 * /*from   w w  w  .ja v a  2 s  . c o  m*/
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text = "IK Analyzer???????";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer(true);

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc = new Document();
        doc.add(new StringField("ID", "10000", Field.Store.YES));
        doc.add(new TextField(fieldName, text, Field.Store.YES));
        iwriter.addDocument(doc);
        iwriter.close();

        //?**********************************
        //?   
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat.java

License:Apache License

/** Make sure the final sub-block(s) are not skipped. */
public void testFinalBlock() throws Exception {
    Directory d = newDirectory();//from  w ww. ja  v a  2  s  .co m
    IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
    for (int i = 0; i < 25; i++) {
        Document doc = new Document();
        doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
        doc.add(newStringField("field", "z" + Character.toString((char) (97 + i)), Field.Store.NO));
        w.addDocument(doc);
    }
    w.forceMerge(1);

    DirectoryReader r = DirectoryReader.open(w);
    assertEquals(1, r.leaves().size());
    RocanaFieldReader field = (RocanaFieldReader) r.leaves().get(0).reader().fields().terms("field");
    // We should see exactly two blocks: one root block (prefix empty string) and one block for z* terms (prefix z):
    RocanaStats stats = field.getStats();
    assertEquals(0, stats.floorBlockCount);
    assertEquals(2, stats.nonFloorBlockCount);
    r.close();
    w.close();
    d.close();
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat3.java

License:Apache License

private void verify(Directory dir) throws Exception {
    DirectoryReader ir = DirectoryReader.open(dir);
    for (LeafReaderContext leaf : ir.leaves()) {
        LeafReader leafReader = leaf.reader();
        assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true);
        assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true);
        assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true);
        assertTerms(leafReader.terms("field5payloadsFixed"), leafReader.terms("field6payloadsVariable"), true);
        assertTerms(leafReader.terms("field6payloadsVariable"), leafReader.terms("field7payloadsFixedOffsets"),
                true);/*from  w w  w  .j  a v  a 2  s  . c o m*/
        assertTerms(leafReader.terms("field7payloadsFixedOffsets"),
                leafReader.terms("field8payloadsVariableOffsets"), true);
    }
    ir.close();
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

public void assertQuery(Term t, Directory dir, int num) throws IOException {
    if (VERBOSE) {
        System.out.println("\nTEST: assertQuery " + t);
    }//from   w w w. j a va  2 s  .  c  o m
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = newSearcher(reader);
    TopDocs search = searcher.search(new TermQuery(t), num + 10);
    assertEquals(num, search.totalHits);
    reader.close();

}

From source file:com.rondhuit.w2v.lucene.LuceneIndexCorpus.java

License:Apache License

public LuceneIndexCorpus(Config config) throws IOException {
    super(config);

    LuceneIndexConfig liConfig = (LuceneIndexConfig) config;
    field = liConfig.getField();/*w w  w .ja  va2  s.  c o  m*/
    analyzer = loadAnalyzer(liConfig.getAnalyzer());
    Directory dir = FSDirectory.open(new File(liConfig.getIndexDir()));
    reader = DirectoryReader.open(dir);
}

From source file:com.search.lucene.demo.facet.ExpressionAggregationFacetsExample.java

License:Apache License

/** User runs a query and aggregates facets. */
private FacetResult search() throws IOException, ParseException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Aggregate categories by an expression that combines the document's score
    // and its popularity field
    Expression expr = JavascriptCompiler.compile("_score * sqrt(popularity)");
    SimpleBindings bindings = new SimpleBindings();
    bindings.add(new SortField("_score", SortField.Type.SCORE)); // the score of the document
    bindings.add(new SortField("popularity", SortField.Type.LONG)); // the value of the 'popularity' field

    // Aggregates the facet values
    FacetsCollector fc = new FacetsCollector(true);

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query:
    FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

    // Retrieve results
    Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, expr.getValueSource(bindings));
    FacetResult result = facets.getTopChildren(10, "A");

    indexReader.close();/*from ww w  .java2  s.c  om*/
    taxoReader.close();

    return result;
}