Example usage for org.apache.lucene.index DirectoryReader open

List of usage examples for org.apache.lucene.index DirectoryReader open

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader open.

Prototype

public static DirectoryReader open(final IndexCommit commit) throws IOException 

Source Link

Document

Expert: returns an IndexReader reading the index in the given IndexCommit .

Usage

From source file:com.zghw.lucene.demo.FormBasedXmlQueryDemo.java

License:Apache License

private void openExampleIndex() throws IOException {
    //Create a RAM-based index from our test data file
    RAMDirectory rd = new RAMDirectory();
    IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_CURRENT, analyzer);
    IndexWriter writer = new IndexWriter(rd, iwConfig);
    InputStream dataIn = getServletContext().getResourceAsStream("/WEB-INF/data.tsv");
    BufferedReader br = new BufferedReader(new InputStreamReader(dataIn, StandardCharsets.UTF_8));
    String line = br.readLine();// w w  w . j  a  v a  2s.c om
    final FieldType textNoNorms = new FieldType(TextField.TYPE_STORED);
    textNoNorms.setOmitNorms(true);
    while (line != null) {
        line = line.trim();
        if (line.length() > 0) {
            //parse row and create a document
            StringTokenizer st = new StringTokenizer(line, "\t");
            Document doc = new Document();
            doc.add(new Field("location", st.nextToken(), textNoNorms));
            doc.add(new Field("salary", st.nextToken(), textNoNorms));
            doc.add(new Field("type", st.nextToken(), textNoNorms));
            doc.add(new Field("description", st.nextToken(), textNoNorms));
            writer.addDocument(doc);
        }
        line = br.readLine();
    }
    writer.close();

    //open searcher
    // this example never closes it reader!
    IndexReader reader = DirectoryReader.open(rd);
    searcher = new IndexSearcher(reader);
}

From source file:com.zghw.lucene.demo.MultiCategoryListsFacetsExample.java

License:Apache License

/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    FacetsCollector fc = new FacetsCollector();

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query:
    FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

    // Retrieve results
    List<FacetResult> results = new ArrayList<FacetResult>();

    // Count both "Publish Date" and "Author" dimensions
    Facets author = new FastTaxonomyFacetCounts("author", taxoReader, config, fc);
    results.add(author.getTopChildren(10, "Author"));

    Facets pubDate = new FastTaxonomyFacetCounts("pubdate", taxoReader, config, fc);
    results.add(pubDate.getTopChildren(10, "Publish Date"));

    indexReader.close();/*from  w w w. j a v a 2  s  .c om*/
    taxoReader.close();

    return results;
}

From source file:com.zghw.lucene.demo.SimpleSortedSetFacetsExample.java

License:Apache License

/** User runs a query and counts facets. */
private List<FacetResult> search() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);

    // Aggregatses the facet counts
    FacetsCollector fc = new FacetsCollector();

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query:
    FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

    // Retrieve results
    Facets facets = new SortedSetDocValuesFacetCounts(state, fc);

    List<FacetResult> results = new ArrayList<FacetResult>();
    results.add(facets.getTopChildren(10, "Author"));
    results.add(facets.getTopChildren(10, "Publish Year"));
    indexReader.close();/*from  w  w w  .j  a  va 2s.c  om*/

    return results;
}

From source file:com.zsq.lucene.chapter1.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);//w ww  . jav a  2s .  c om
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    Paths.get(index);
    IndexReader reader = DirectoryReader.open(FSDirectory.open(null));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43);

    BufferedReader in = null;
    if (queries != null) {
        in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8);
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    }
    QueryParser parser = new QueryParser(Version.LUCENE_43, field, analyzer);
    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:concurrency.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);/*from  w  ww.jav  a  2 s.  c o  m*/
    }

    String index = "/Users/rene/learn/topic-index"; //"/Users/rene/learn/learn5/lucene/finder/index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    // regular search
    // String queryString = "computer";

    String queryString = "viewresolver spring"; //"fitness";// "ExecutorService";//"EventListener";
    // //"Country"; //"Preconditions";

    // wildcard query
    // String queryString = "te*t";

    // fuzzy query
    // String queryString = "roam~2";

    // phrase query
    // String queryString = "\"apache lucene\"~5";

    // boolean search
    // String queryString = "\"networks\" AND \"protocol\"";

    // boosted search
    // String queryString = "computer^10 crime";

    int hitsPerPage = 100;
    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    BufferedReader in = null;
    QueryParser parser = new QueryParser(field, analyzer);

    Query query = parser.parse(queryString);

    System.out.println("Searching for: " + query.toString(field));

    searcher.search(query, null, 1000); //hitsPerPage); //100);

    doSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

    reader.close();
}

From source file:control.Search.java

/**
 * Search for previously indexed feeds through 'title' and 'description' fields, according to a query
 * // w w  w.j  a va 2  s . c  om
 * @param query terms to be considered in the search
 * @return a JSON representation of the retrieved feeds
 * @throws ParseException query parsing failure
 * @throws IOException  I/O issue when creating index
 */
public String queryIndexedFeeds(String query) throws ParseException, IOException {
    //creates IndexReader with analyzers
    IndexReader reader = DirectoryReader.open(index);
    IndexSearcher searcher = new IndexSearcher(reader);
    StandardAnalyzer analyzer = new StandardAnalyzer();
    MultiFieldQueryParser queryParser = new MultiFieldQueryParser(new String[] { "title", "description" },
            analyzer);

    //search for documents
    TopDocs docs = searcher.search(queryParser.parse(query), 25);
    ScoreDoc[] hits = docs.scoreDocs;

    //iterate over results and put on JSON format
    JSONArray jsonArray = new JSONArray();
    for (int i = 0; i < hits.length; i++) {
        int docId = hits[i].doc;
        Document d = searcher.doc(docId);

        //create new json object
        JSONObject json = new JSONObject();
        json.put("id", d.get("id"));
        json.put("link", d.get("link"));
        json.put("title", d.get("title"));
        json.put("description", d.get("description"));

        jsonArray.put(json);
    }

    reader.close();
    String ret = jsonArray.toString();

    return ret;
}

From source file:core.Indexer.java

public void open(String fieldToRead, boolean wildcard) throws IndexerException {
    switch (mode) {
    case READ://from   w w w. j  a  v a 2 s.com
        try {
            indexReader = DirectoryReader.open(dir);
            indexSearcher = new IndexSearcher(indexReader);
            analyzer = new KeywordAnalyzer();
            queryParser = new QueryParser(fieldToRead, analyzer);
            queryParser.setAllowLeadingWildcard(wildcard);
        } catch (IOException ex) {
            throw new IndexerException(ex.getMessage());
        }
        break;
    case READWRITE:
        try {
            indexReader = DirectoryReader.open(dir);
            indexSearcher = new IndexSearcher(indexReader);
            indexWriterConfig = new IndexWriterConfig(new StandardAnalyzer());
            indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
            indexWriter = new IndexWriter(dir, indexWriterConfig);
            analyzer = new KeywordAnalyzer();
            queryParser = new QueryParser(fieldToRead, analyzer);
            queryParser.setAllowLeadingWildcard(wildcard);
        } catch (IOException ex) {
            throw new IndexerException(ex.getMessage());
        }
        break;
    case WRITE:
        throw new IndexerException("El ndice est en modo escritura. Utliza el mtodo open()");
    }
}

From source file:coreservlets.consolesearch.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);//from  www . j av  a  2 s  .co m
    }

    String index = "index";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;
    String[] fields = { "title", "description", "keywords", "contents" };
    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } /*else if ("-field".equals(args[i])) {
          fields = field.args[i+1];
          i++;
          } */else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }
    System.out.println(System.getenv());
    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    //Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);    
    Analyzer analyzer = new ICTCLASAnalyzer();
    MultiFieldQueryParser mp = new MultiFieldQueryParser(Version.LUCENE_44, fields, analyzer);
    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8"));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }
    //    QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer);
    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }
        System.out.println(line);
        Query query = mp.parse(line);
        System.out.println("Searching for: " + query.toString());

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:csdn.lucene.first.version.Searcher.java

License:Apache License

protected ArrayList<String> search(String indexDir, String q) {
    //5.5 replace directory with path 
    java.nio.file.Path pathA = Paths.get(indexDir);
    FSDirectory dir;/* w  w  w . ja  v  a  2  s  .  co  m*/
    IndexSearcher is;
    try {
        dir = FSDirectory.open(pathA);
        DirectoryReader dReader = DirectoryReader.open(dir);
        is = new IndexSearcher(dReader);

        QueryParser parser = new QueryParser("contents", analyzer);
        Query query = parser.parse(q);
        long start = System.currentTimeMillis();
        //is.search():Finds the top n hits for query
        //TopDocs:Represents hits returned by IndexSearcher.search(Query,int).
        TopDocs hits = is.search(query, 10); //5 
        long end = System.currentTimeMillis();

        System.err.println("Found " + hits.totalHits + //6  
                " document(s) (in " + (end - start) + // 6
                " milliseconds) that matched query '" + // 6
                q + "':"); // 6

        //ScoreDocs:The top hits for the query.
        for (ScoreDoc scoreDoc : hits.scoreDocs) {
            Document doc = is.doc(scoreDoc.doc);
            //         System.out.println("From Searcher : filename = " + doc.get("fieldname"));
            //         System.out.println("From Searcher : fullpath = " + doc.get("fullpath"));
            hit_ids.add(doc.get("fieldname"));
            hit_paths.add(doc.get("fullpath"));
        }
        dReader.close();
        dir.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
    return hit_ids;
}

From source file:dbn.db.FullTextTrigger.java

/**
 * Get the Lucene index access//from   ww  w  . j av  a 2s.  c o m
 *
 * @param   conn                SQL connection
 * @throws  SQLException        Unable to access the Lucene index
 */
private static void getIndexAccess(Connection conn) throws SQLException {
    if (!isActive) {
        throw new SQLException("NRS is no longer active");
    }
    boolean obtainedUpdateLock = false;
    if (!indexLock.writeLock().hasLock()) {
        indexLock.updateLock().lock();
        obtainedUpdateLock = true;
    }
    try {
        if (indexPath == null || indexWriter == null) {
            indexLock.writeLock().lock();
            try {
                if (indexPath == null) {
                    getIndexPath(conn);
                }
                if (directory == null) {
                    directory = FSDirectory.open(indexPath);
                }
                if (indexWriter == null) {
                    IndexWriterConfig config = new IndexWriterConfig(analyzer);
                    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
                    indexWriter = new IndexWriter(directory, config);
                    Document document = new Document();
                    document.add(new StringField("_QUERY", "_CONTROL_DOCUMENT_", Field.Store.YES));
                    indexWriter.updateDocument(new Term("_QUERY", "_CONTROL_DOCUMENT_"), document);
                    indexWriter.commit();
                    indexReader = DirectoryReader.open(directory);
                    indexSearcher = new IndexSearcher(indexReader);
                }
            } finally {
                indexLock.writeLock().unlock();
            }
        }
    } catch (IOException | SQLException exc) {
        Logger.logErrorMessage("Unable to access the Lucene index", exc);
        throw new SQLException("Unable to access the Lucene index", exc);
    } finally {
        if (obtainedUpdateLock) {
            indexLock.updateLock().unlock();
        }
    }
}