Example usage for org.apache.lucene.index DirectoryReader open

List of usage examples for org.apache.lucene.index DirectoryReader open

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader open.

Prototype

public static DirectoryReader open(final IndexCommit commit) throws IOException 

Source Link

Document

Expert: returns an IndexReader reading the index in the given IndexCommit .

Usage

From source file:demo.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);/*  w  w w  .  j ava  2  s .  com*/
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    BufferedReader in = null;
    if (queries != null) {
        in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8);
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    }
    QueryParser parser = new QueryParser(field, analyzer);

    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:Demo1.MyServlet.java

private void gotoSearch(PrintWriter out, HttpServletRequest request, HttpServletResponse response) {
    try {//from  ww  w .  j a  va2s .c o  m
        //   Text to search
        String querystr = request.getParameter("keyword");

        log.addHistory(querystr);

        //   The \"title\" arg specifies the default field to use when no field is explicitly specified in the query
        Query q = new QueryParser("Searching", analyzer).parse(querystr);

        // Searching code
        int hitsPerPage = 10;
        IndexReader reader = DirectoryReader.open(index);
        IndexSearcher searcher = new IndexSearcher(reader);
        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage);
        searcher.search(q, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;

        //   Code to display the results of search
        //out.println("Found " + hits.length + " Classes Matching your Requirement");
        courseList = new ArrayList();
        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = searcher.doc(docId);
            Land course = new Land(d.get("name"), d.get("price"), d.get("area"), d.get("purpose"));
            //out.println((i + 1) + ". " +  d.get("Number")+ d.get("Classes") );
            courseList.add(course);
        }
        request.setAttribute("Lands", courseList);
        RequestDispatcher de = request.getRequestDispatcher("/table.jsp");
        de.forward(request, response);

        // reader can only be closed when there is no need to access the documents any more
        reader.close();
    } catch (Exception e) {
        System.out.println(e.getMessage());
    }
}

From source file:Demo2.MyServlet.java

private void gotoSearch(PrintWriter out, HttpServletRequest request, HttpServletResponse response) {
    try {//from w  w  w. j  a v a2s.  c o m
        //   Text to search
        String querystr = request.getParameter("keyword");

        log.addHistory(querystr);

        //   The \"title\" arg specifies the default field to use when no field is explicitly specified in the query
        Query q = new QueryParser("Classes", analyzer).parse(querystr);

        // Searching code
        int hitsPerPage = 10;
        IndexReader reader = DirectoryReader.open(index);
        IndexSearcher searcher = new IndexSearcher(reader);
        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage);
        searcher.search(q, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;

        //   Code to display the results of search
        //out.println("Found " + hits.length + " Classes Matching your Requirement");
        courseList = new ArrayList();
        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = searcher.doc(docId);
            Child course = new Child(d.get("Number"), d.get("Classes"), d.get("Time"), d.get("Department"));
            //out.println((i + 1) + ". " +  d.get("Number")+ d.get("Classes") );
            courseList.add(course);
        }
        request.setAttribute("course", courseList);
        RequestDispatcher de = request.getRequestDispatcher("/table.jsp");
        de.forward(request, response);

        // reader can only be closed when there is no need to access the documents any more
        reader.close();
    } catch (Exception e) {
        System.out.println(e.getMessage());
    }
}

From source file:di.uniba.it.tee2.search.TemporalEventSearch.java

License:Open Source License

public void init() throws IOException {
    DirectoryReader timeReader = DirectoryReader.open(FSDirectory.open(new File(mainDir + "/time")));
    DirectoryReader docReader = DirectoryReader.open(FSDirectory.open(new File(mainDir + "/doc")));
    DirectoryReader repoReader = DirectoryReader.open(FSDirectory.open(new File(mainDir + "/repo")));
    doc_searcher = new IndexSearcher(docReader);
    time_searcher = new IndexSearcher(timeReader);
    repo_searcher = new IndexSearcher(repoReader);
}

From source file:dk.dbc.opensearch.fedora.search.PidCollectorTest.java

License:Open Source License

private AtomicReader populateIndexAndGetIndexReader(Document... docs) throws IOException {
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_41, new SimpleAnalyzer(Version.LUCENE_41));
    IndexWriter indexWriter = new IndexWriter(index, config);
    for (Document doc : docs) {
        indexWriter.addDocument(doc);//from w w  w.  ja  va 2  s .  c o m
    }
    indexWriter.commit();
    indexWriter.close();
    return SlowCompositeReaderWrapper.wrap(DirectoryReader.open(index));
}

From source file:dk.dma.msinm.lucene.AbstractLuceneIndex.java

License:Open Source License

/**
 * Returns the cached index reader, or creates one if none is defined
 * @return the shared index reader/*from   ww  w . j a v a2  s  .  c om*/
 */
public DirectoryReader getIndexReader() throws IOException {
    if (reader == null) {
        Path indexFolder = getIndexFolder();
        try {
            reader = DirectoryReader.open(FSDirectory.open(indexFolder.toFile()));
        } catch (IOException ex) {
            log.error("Failed to open Lucene Index in folder " + indexFolder);
            throw ex;
        }
    }
    return reader;
}

From source file:dk.dma.msinm.lucene.CommitUserDataTest.java

License:Open Source License

@Test
public void test() throws IOException {

    File indexFolder = Files.createTempDir();
    Directory directory = FSDirectory.open(indexFolder);

    // Create an index writer
    IndexWriterConfig iwc = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION,
            new StandardAnalyzer(LuceneUtils.LUCENE_VERSION));
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    IndexWriter indexWriter = new IndexWriter(directory, iwc);

    // Write a document
    Document doc = new Document();
    doc.add(new IntField("id", 100, Field.Store.YES));
    indexWriter.addDocument(doc);// w  ww .  j  a  v a 2 s  .c o m

    // Add user data
    Map<String, String> userData = new HashMap<>();
    userData.put("A", "B");
    indexWriter.setCommitData(userData);
    indexWriter.close();

    // Check if we can read user data
    DirectoryReader indexReader = DirectoryReader.open(FSDirectory.open(indexFolder));
    assertEquals("B", indexReader.getIndexCommit().getUserData().get("A"));

}

From source file:dk.dma.msinm.lucene.SpatialLuceneTest.java

License:Open Source License

@Test
public void testSpatialSearch() throws IOException, ParseException {

    int maxLevels = 11;//results in sub-meter precision for geohash
    SpatialPrefixTree grid = new GeohashPrefixTree(ctx, maxLevels);

    strategy = new RecursivePrefixTreeStrategy(grid, "myGeoField");
    Directory directory = new RAMDirectory();

    IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_47, null);
    IndexWriter indexWriter = new IndexWriter(directory, iwConfig);
    indexWriter.addDocument(newSampleDocument(2, ctx.makePoint(-80.93, 33.77)));
    indexWriter.addDocument(newSampleDocument(4, ctx.readShapeFromWkt("POINT(60.9289094 -50.7693246)")));
    indexWriter.addDocument(newSampleDocument(20, ctx.makePoint(0.1, 0.1), ctx.makePoint(0, 0)));
    indexWriter.addDocument(newSampleDocument(30,
            JtsSpatialContext.GEO.readShapeFromWkt("POLYGON((0 0, -90 0, -90 40, 0 40, 0 0))")));
    indexWriter.close();//from   w  w w.j  a v  a  2 s  . co m

    IndexReader indexReader = DirectoryReader.open(directory);
    IndexSearcher indexSearcher = new IndexSearcher(indexReader);
    Sort idSort = new Sort(new SortField("id", SortField.Type.INT));

    // Search 1
    SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects,
            ctx.makeCircle(-80.0, 33.0, DistanceUtils.dist2Degrees(200, DistanceUtils.EARTH_MEAN_RADIUS_KM)));
    TopDocs docs = indexSearcher.search(new MatchAllDocsQuery(), strategy.makeFilter(args), 10, idSort);
    assertDocMatchedIds(indexSearcher, docs, 2, 30);

    // Search 2
    args = new SpatialArgs(SpatialOperation.Intersects,
            JtsSpatialContext.GEO.readShapeFromWkt("POLYGON((-10 10, -20 0, -20 20, -10 20, -10 10))"));
    docs = indexSearcher.search(new MatchAllDocsQuery(), strategy.makeFilter(args), 10, idSort);
    assertDocMatchedIds(indexSearcher, docs, 30);
}

From source file:dk.netarkivet.harvester.indexserver.DedupCrawlLogIndexCacheTester.java

License:Open Source License

@Test
public void testCombine() throws Exception {
    // These are the origins of job #4 and #1
    Map<String, String> origins = new HashMap<String, String>(8);

    // "job" #4/*  w ww  .j av  a  2s  .  c o  m*/
    origins.put("http://www.kb.dk/bevarbogen/images/menu_03.gif",
            "54-8-20050620183552-00016-kb-prod-har-001.kb.dk.arc,92248220");
    origins.put("http://www.kb.dk/bevarbogen/images/menu_06.gif",
            "54-8-20050620183552-00016-kb-prod-har-001.kb.dk.arc,95056820");
    origins.put("http://www.kb.dk/bevarbogen/images/menu_07.gif",
            "54-8-20050620183552-00016-kb-prod-har-001.kb.dk.arc,95468220");
    origins.put("http://www.kb.dk/bevarbogen/images/menutop.gif",
            "54-8-20050620183552-00016-kb-prod-har-002.kb.dk.arc,42");
    origins.put("http://www.kb.dk/bevarbogen/script.js", "check-arc,42");

    // "job" #1
    origins.put("http://www.kb.dk/clear.gif", "54-8-20050620183552-00016-kb-prod-har-001.kb.dk.arc,55983420");
    origins.put("http://www.kb.dk/dither.gif", "54-8-20050620183552-00016-kb-prod-har-001.kb.dk.arc,53985420");
    origins.put("http://www.kb.dk/dither_blaa.gif",
            "54-8-20050620183552-00016-kb-prod-har-001.kb.dk.arc,58593420");

    Map<Long, File> files = new HashMap<Long, File>();
    files.put(1L, TestInfo.CRAWL_LOG_1);
    files.put(4L, TestInfo.CRAWL_LOG_4);

    Set<Long> requiredSet = new HashSet<Long>();
    requiredSet.add(1L);
    requiredSet.add(4L);

    DedupCrawlLogIndexCache cache = new DedupCrawlLogIndexCache();
    File resultFile = cache.getCacheFile(files.keySet());

    cache.combine(files);

    assertTrue("Result file should have contents after combining", resultFile.length() > 0);
    assertFalse("Should not have left an unzipped lucene index",
            new File(resultFile.getAbsolutePath().substring(0, resultFile.getAbsolutePath().length() - 4))
                    .exists());
    File unzipDir = new File(TestInfo.WORKING_DIR, "luceneindex");
    if (!unzipDir.mkdir()) {
        fail("Unable to create unzipDir '" + unzipDir.getAbsolutePath() + "' for luceneindex: ");
    }
    File[] resultFiles = resultFile.listFiles();
    for (File f : resultFiles) {
        if (f.getName().endsWith(".gz")) {
            InputStream in = new GZIPInputStream(new FileInputStream(f));
            FileUtils.writeStreamToFile(in,
                    new File(unzipDir, f.getName().substring(0, f.getName().length() - ".gz".length())));
            in.close();
        }
    }

    Directory luceneDirectory = new MMapDirectory(unzipDir);
    IndexReader reader = DirectoryReader.open(luceneDirectory);

    // System.out.println("doc-count: " + reader.maxDoc());
    IndexSearcher index = new IndexSearcher(reader);
    // QueryParser queryParser = new QueryParser("url",
    // new WhitespaceAnalyzer(dk.netarkivet.common.constants.LUCENE_VERSION));
    // QueryParser queryParser = new QueryParser(dk.netarkivet.common.Constants.LUCENE_VERSION, "url",
    // new WhitespaceAnalyzer(dk.netarkivet.common.Constants.LUCENE_VERSION));
    // Query q = queryParser.parse("http\\://www.kb.dk*");

    // Crawl log 1 has five entries for www.kb.dk, but two are robots
    // and /, which the indexer ignores, leaving 3
    // Crawl log 4 has five entries for www.kb.dk

    // System.out.println("Found hits: " + hits.size());
    // for (ScoreDoc hit : hits) {
    // int docID = hit.doc;
    // Document doc = index.doc(docID);
    //
    // String url = doc.get("url");
    // String origin = doc.get("origin");
    // System.out.println("url,origin = " + url + ", " + origin);
    // }

    verifySearchResult(origins, index);

    assertTrue("Should have found all origins, but have still " + origins.size() + " left: " + origins,
            origins.isEmpty());
}

From source file:dk.statsbiblioteket.netark.dvenabler.DVReaderTest.java

License:Apache License

public void testCreateAndReadWrappedIndex() throws IOException, ParseException {
    log.info("testCreateAndReadPlainIndex started");
    final File INDEX = generateIndex();

    try {/*from   w ww  . j  a  v a 2 s .co m*/
        Directory directory = MMapDirectory.open(INDEX);
        IndexReader reader = new DVDirectoryReader(DirectoryReader.open(directory),
                createDVFieldDescriptions(INDEX));
        IndexSearcher searcher = new IndexSearcher(reader);

        assertIndexValues(reader, searcher, true);
    } finally {
        delete(INDEX);
    }
}