Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:io.datalayer.lucene.index.LuceneLifecycleTest.java

License:Apache License

private int getHitCount(String fieldName, String searchString) throws IOException {
    IndexReader reader = DirectoryReader.open(directory);
    IndexSearcher searcher = new IndexSearcher(reader);
    Term t = new Term(fieldName, searchString);
    Query query = new TermQuery(t);
    int hitCount = AosLuceneUtil.hitCount(searcher, query);
    reader.close();
    return hitCount;
}

From source file:io.datalayer.lucene.read.LuceneReaderTest.java

License:Apache License

@Test
public void testReader() throws IOException {
    IndexReader reader = DirectoryReader.open(directory);
    assertEquals(keywords.length, reader.maxDoc());
    assertEquals(keywords.length, reader.numDocs());
    reader.close();
}

From source file:io.datalayer.lucene.search.FileSearcherMain.java

License:Apache License

/**
 * Simple command-line based search demo.
 *///w w w.j  a  va  2  s.c  o m
public static void main(String... args) throws Exception {

    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/java/4_0/demo.html for details.";
    if ((args.length > 0) && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        LOGGER.info(usage);
        System.exit(0);
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_46);

    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8"));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }
    QueryParser parser = new QueryParser(Version.LUCENE_46, field, analyzer);
    while (true) {
        if ((queries == null) && (queryString == null)) { // prompt the user
            LOGGER.info("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if ((line == null) || (line.length() == -1)) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        LOGGER.info("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            LOGGER.info("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, (queries == null) && (queryString == null));

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:io.datalayer.lucene.search.SimpleQueryMain.java

License:Apache License

public static void main(String... args) throws IOException {

    Directory dir = FSDirectory.open(new File("/tmp/index"));
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);

    Query q = new TermQuery(new Term("contents", "lucene"));
    TopDocs topDocs = searcher.search(q, 10);
    LOGGER.info("Hits Count=" + topDocs.totalHits);
    reader.close();

}

From source file:io.datalayer.lucene.search.SimpleSearcherMain.java

License:Apache License

public static void search(String indexDir, String q) throws IOException, ParseException {

    Directory dir = FSDirectory.open(new File(indexDir));
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher is = new IndexSearcher(reader);

    QueryParser parser = new QueryParser(Version.LUCENE_46, "contents",
            new StandardAnalyzer(Version.LUCENE_46));
    Query query = parser.parse(q);
    long start = System.currentTimeMillis();
    TopDocs hits = is.search(query, 10);
    long end = System.currentTimeMillis();

    System.err.println("Found " + hits.totalHits + " document(s) (in " + (end - start)
            + " milliseconds) that matched query '" + q + "':");

    for (ScoreDoc scoreDoc : hits.scoreDocs) {
        Document doc = is.doc(scoreDoc.doc);
        LOGGER.info(doc.get("fullpath"));
    }/*from w w w . j a va  2  s  .c  o  m*/

    reader.close();

}

From source file:io.yucca.lucene.FieldRemoverTestCase.java

License:Apache License

@Test
public void testFieldRemoval() throws IOException {
    File testindex = createTempDir("test");
    File cleanindex = createTempDir("clean");
    newFSIndex(testindex);/*from   www .j  a v a  2s .c o m*/

    String[] fields = { "dc_content" };
    FieldRemover remover = new FieldRemover();
    remover.removeFields(testindex, cleanindex, fields, Version.LATEST);
    IndexReader reader = IndexHelper.getIndexReader(cleanindex);
    IndexSearcher searcher = newSearcher(reader);
    assertEquals(1, searcher.search(new TermQuery(new Term("dc_title", "apache")), 1).totalHits);
    assertEquals(0, searcher.search(new TermQuery(new Term("dc_content", "apache")), 1).totalHits);
    reader.close();
}

From source file:ir.project.TFIDFMatrix.java

private void createTermMap() {
    try {/*from ww  w .j  a  v  a  2 s .  c o m*/
        IndexReader reader = DirectoryReader.open(this.index);

        this.termMap = new HashMap<>(); // Map used to identify position in matrix for 
        this.numDocs = reader.maxDoc();
        int count = 0;

        // Setup the termMap
        for (int i = 0; i < numDocs; i++) {

            Terms vector = reader.getTermVector(i, "text");
            if (vector == null) {
                System.err.println("Vector is null!");
                continue;
            }

            TermsEnum it = vector.iterator();
            while (it.next() != null) {
                Term t = new Term("text", it.term().utf8ToString());

                if (!termMap.containsKey(it.term().utf8ToString())) {
                    termMap.put(it.term().utf8ToString(), count);
                    count += 1;
                }
            }
        }

        this.numTerms = count;
        reader.close();

    } catch (IOException ex) {
        Logger.getLogger(TFIDFMatrix.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:ir.project.TFIDFMatrix.java

private void createMatrix() {
    try {// ww w.  j  a  v a  2s .c  om
        this.matrix = new TFIDFBookVector[numDocs];

        IndexReader reader = DirectoryReader.open(this.index);

        for (int i = 0; i < numDocs; i++) {
            Terms vector = reader.getTermVector(i, "text");

            // get title
            IndexableField titleField = reader.document(i).getField("title");
            String title = titleField.stringValue();

            // get isbn
            IndexableField isbnField = reader.document(i).getField("isbn");
            String isbn = isbnField.stringValue();

            // get author
            IndexableField authorField = reader.document(i).getField("author");
            String author = authorField.stringValue();

            this.matrix[i] = new TFIDFBookVector(numTerms, title, isbn, author);

            if (vector == null) {
                System.err.println("Vector is null");
                continue;
            }

            TermsEnum it = vector.iterator();

            while (it.next() != null) {
                Term t = new Term("text", it.term().utf8ToString());

                // TotalTermFreq returns frequency of term in document.
                Long tf = it.totalTermFreq();
                double idf = (double) 1 / (double) reader.totalTermFreq(t);

                double tfIdfWeight = tf * idf;

                // put TF-IDF weight in matrix
                int termIndex = this.termMap.get(it.term().utf8ToString());
                this.matrix[i].editValue(termIndex, tfIdfWeight);
            }
        }

        reader.close();

    } catch (IOException ex) {
        Logger.getLogger(TFIDFMatrix.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:irlucene.CFCRetrieval.java

public ScoreDoc[] query(QueryData queryData, float titleBoost) {
    HashMap<String, Float> boosts;
    MultiFieldQueryParser queryParser;//from w w w.j av a2s .c o m
    Query q;
    IndexReader indexReader;
    IndexSearcher indexSearcher;
    TopDocs docs;
    ScoreDoc[] hits = null;
    try {
        boosts = new HashMap<>();
        if (titleBoost != 0) {
            boosts.put("title", titleBoost);
        }
        queryParser = new MultiFieldQueryParser(
                new String[] { "paperNumber", "recordNumber", "acessionNumber", "authors", "title", "source",
                        "majorSubjects", "minorSubjects", "abstractExtract", "references", "citations" },
                analyzer, boosts);
        q = queryParser.parse(queryData.getQuery());
        indexReader = DirectoryReader.open(index);
        indexSearcher = new IndexSearcher(indexReader);
        docs = indexSearcher.search(q, indexReader.numDocs());

        hits = docs.scoreDocs;
        indexReader.close();
    } catch (ParseException | IOException ex) {
        Logger.getLogger(CFCRetrieval.class.getName()).log(Level.SEVERE, null, ex);
    }
    return hits;
}

From source file:irlucene.MEDRetrieval.java

public ScoreDoc[] query(QueryData queryData) {
    HashMap<String, Float> boosts;
    MultiFieldQueryParser queryParser;//from w ww  .  ja  v a2s  .  co  m
    Query q;
    IndexReader indexReader;
    IndexSearcher indexSearcher;
    TopDocs docs;
    ScoreDoc[] hits = null;
    try {
        boosts = new HashMap<>();
        queryParser = new MultiFieldQueryParser(new String[] { "id", "content" }, analyzer, boosts);
        q = queryParser.parse(queryData.getQuery());
        indexReader = DirectoryReader.open(index);
        indexSearcher = new IndexSearcher(indexReader);
        docs = indexSearcher.search(q, indexReader.numDocs());

        hits = docs.scoreDocs;
        indexReader.close();
    } catch (ParseException | IOException ex) {
        Logger.getLogger(CFCRetrieval.class.getName()).log(Level.SEVERE, null, ex);
    }
    return hits;
}