Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:com.sxc.lucene.index.IndexingTest.java

License:Apache License

public void testIndexReader() throws IOException {
    IndexReader reader = DirectoryReader.open(directory);
    assertEquals(ids.length, reader.maxDoc()); // 8
    assertEquals(ids.length, reader.numDocs()); // 8
    reader.close();
}

From source file:com.sxc.lucene.searching.BasicSearchingTest.java

License:Apache License

public void testTerm() throws Exception {
    IndexReader reader = DirectoryReader.open(directory); // A
    IndexSearcher searcher = new IndexSearcher(reader); // B

    Term t = new Term("country", "");
    Query query = new TermQuery(t);
    TopDocs docs = searcher.search(query, 1);
    System.out.println(docs.totalHits);

    reader.close();
    directory.close();/*w ww .  ja v a  2 s. com*/
}

From source file:com.sxc.lucene.searching.BooleanQueryTest.java

License:Apache License

public void testAnd() throws Exception {
    TermQuery searchingBooks = new TermQuery(new Term("subject", "search")); //#1

    Query books2010 = //#2
            NumericRangeQuery.newIntRange("pubmonth", 201001, //#2
                    201012, //#2
                    true, true); //#2

    BooleanQuery searchingBooks2010 = new BooleanQuery(); //#3
    searchingBooks2010.add(searchingBooks, BooleanClause.Occur.MUST); //#3
    searchingBooks2010.add(books2010, BooleanClause.Occur.MUST); //#3

    Directory dir = TestUtil.getBookIndexDirectory();
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs matches = searcher.search(searchingBooks2010, 10);

    assertTrue(TestUtil.hitsIncludeTitle(searcher, matches, "Lucene in Action, Second Edition"));
    reader.close();
    dir.close();//w w w.j  a  v  a 2s.c  o m
}

From source file:com.sxc.lucene.searching.BooleanQueryTest.java

License:Apache License

public void testOr() throws Exception {
    TermQuery methodologyBooks = new TermQuery( // #1
            new Term("category", // #1
                    "/technology/computers/programming/methodology")); // #1

    TermQuery easternPhilosophyBooks = new TermQuery( // #2
            new Term("category", // #2
                    "/philosophy/eastern")); // #2

    BooleanQuery enlightenmentBooks = new BooleanQuery(); // #3
    enlightenmentBooks.add(methodologyBooks, // #3
            BooleanClause.Occur.SHOULD); // #3
    enlightenmentBooks.add(easternPhilosophyBooks, // #3
            BooleanClause.Occur.SHOULD); // #3

    Directory dir = TestUtil.getBookIndexDirectory();
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs matches = searcher.search(enlightenmentBooks, 10);
    System.out.println("or = " + enlightenmentBooks);

    assertTrue(TestUtil.hitsIncludeTitle(searcher, matches, "Extreme Programming Explained"));
    assertTrue(TestUtil.hitsIncludeTitle(searcher, matches, "Tao Te Ching \u9053\u5FB7\u7D93"));
    reader.close();
    dir.close();//from w w  w.  ja v a2 s.  c  o  m
}

From source file:com.sxc.lucene.searching.TermRangeQueryTest.java

License:Apache License

public void testTermRangeQuery() throws Exception {
    String indexDir = "D:/programming/lucene/indexingTest";
    Directory dir = FSDirectory.open(new File(indexDir));
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);

    TermRangeQuery query = TermRangeQuery.newStringRange("id", "1", "4", true, true);

    TopDocs matches = searcher.search(query, 100);

    assertEquals(2, matches.totalHits);/*from  w  ww. j a  v  a 2s.  co  m*/
    reader.close();
    dir.close();
}

From source file:com.taobao.common.tedis.support.lucene.analysis.xanalyzer.TestHighLight.java

License:Open Source License

/**
 * @param args/*from  ww w .ja  va 2  s  .  c o  m*/
 */
public static void main(String[] args) {

    Directory ramDir = new RAMDirectory();
    try {
        IndexWriter writer = new IndexWriter(ramDir, /*
                                                      * new
                                                      * StandardAnalyzer()/
                                                      */XFactory.getWriterAnalyzer());
        Document doc = new Document();
        Field fd = new Field(FIELD_NAME, CONTENT, Field.Store.YES, Field.Index.TOKENIZED,
                Field.TermVector.WITH_POSITIONS_OFFSETS);
        doc.add(fd);
        writer.addDocument(doc);
        writer.optimize();
        writer.close();

        IndexReader reader = IndexReader.open(ramDir);
        String queryString = QUERY;
        QueryParser parser = new QueryParser(FIELD_NAME, /*
                                                          * new
                                                          * StandardAnalyzer
                                                          * ()/
                                                          */XFactory.getWriterAnalyzer());
        Query query = parser.parse(queryString);
        System.out.println(query);
        Searcher searcher = new IndexSearcher(ramDir);
        query = query.rewrite(reader);
        System.out.println(query);
        System.out.println("Searching for: " + query.toString(FIELD_NAME));
        Hits hits = searcher.search(query);

        BoldFormatter formatter = new BoldFormatter();
        Highlighter highlighter = new Highlighter(formatter, new QueryScorer(query));
        highlighter.setTextFragmenter(new SimpleFragmenter(50));
        for (int i = 0; i < hits.length(); i++) {
            String text = hits.doc(i).get(FIELD_NAME);
            int maxNumFragmentsRequired = 5;
            String fragmentSeparator = "...";
            TermPositionVector tpv = (TermPositionVector) reader.getTermFreqVector(hits.id(i), FIELD_NAME);
            TokenStream tokenStream = TokenSources.getTokenStream(tpv);
            /*
             * TokenStream tokenStream2= (new StandardAnalyzer())
             * //XFactory.getWriterAnalyzer() .tokenStream(FIELD_NAME,new
             * StringReader(text));
             *
             * do { Token t = tokenStream2.next(); if(t==null)break;
             * System.out.println("\t" + t.startOffset() + "," +
             * t.endOffset() + "\t" + t.termText()); }while(true);
             */
            String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
                    fragmentSeparator);
            System.out.println("\n" + result);
        }
        reader.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:com.twentyn.patentSearch.Searcher.java

License:Open Source License

@Override
public void close() throws IOException {
    for (IndexReader reader : indexReadersAndSearchers.stream().map(Pair::getLeft)
            .collect(Collectors.toList())) {
        try {//www . j  a va  2 s  . co m
            reader.close();
        } catch (IOException e) {
            LOGGER.error("Unable to close index reader, but continuing to try closing others: %s",
                    e.getMessage());
        }
    }
}

From source file:com.ua.lucene.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]";
    usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);// w w w .  j  a v  a 2  s. co m
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String normsField = null;
    boolean paging = true;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-norms".equals(args[i])) {
            normsField = args[i + 1];
            i++;
        } else if ("-paging".equals(args[i])) {
            if (args[i + 1].equals("false")) {
                paging = false;
            } else {
                hitsPerPage = Integer.parseInt(args[i + 1]);
                if (hitsPerPage == 0) {
                    paging = false;
                }
            }
            i++;
        }
    }

    IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true

    if (normsField != null)
        reader = new OneNormsReader(reader, normsField);

    Searcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);

    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new FileReader(queries));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }
    QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer);
    while (true) {
        if (queries == null) // prompt the user
            System.out.println("Enter query: ");

        String line = in.readLine();

        if (line == null || line.length() == -1)
            break;

        line = line.trim();
        if (line.length() == 0)
            break;

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        if (paging) {
            doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null);
        } else {
            doStreamingSearch(searcher, query);
        }
    }
    reader.close();
}

From source file:com.weasel.lucene.ik.sample.IKAnalyzerDemo.java

License:Apache License

public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    ///*from   w w  w  .  j  ava2s . c  o  m*/
    String text = "IK Analyzer???????";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer();

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_34, analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc = new Document();
        doc.add(new Field("ID", "10000", Field.Store.YES, Field.Index.NOT_ANALYZED));
        doc.add(new Field(fieldName, text, Field.Store.YES, Field.Index.ANALYZED));
        iwriter.addDocument(doc);
        iwriter.close();

        //?**********************************
        //?   
        ireader = IndexReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(Version.LUCENE_34, fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.work.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {

    //  String indexPath = "C:/Users/Harish/Desktop/IR/Data/Data English/masc_500k_texts/written/letters/index/one/";
    //  String queries = "C:/Users/Harish/Desktop/IR/Data/Data English/masc_500k_texts/written/letters/query/query1.txt";
    //   Analyzer analyzer = new StandardAnalyzer();

    //Hindi/*w  w  w  . ja  v a2  s.  com*/
    String indexPath = "C:/Users/Harish/Desktop/IR/Data/Hindi Data/hin_corp_unicode/index/one/";
    //  String queries = "C:/Users/Harish/Desktop/IR/Data/Hindi Data/hin_corp_unicode/query/one.txt";
    String queries = null;
    Analyzer analyzer = new HindiAnalyzer();

    //Chinese
    //  Analyzer analyzer = new CJKAnalyzer();

    String index = indexPath;
    String field = "contents";
    String a = "???";

    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);

    BufferedReader in = null;
    if (queries != null) {
        in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8);
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    }
    QueryParser parser = new QueryParser(field, analyzer);

    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}