Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:com.liusoft.dlog4j.search.SearchProxy.java

License:Open Source License

/**
 * /*from  ww w .j a  v  a  2  s .c o  m*/
 * @param doc
 * @return
 * @throws Exception 
 */
public static synchronized int remove(SearchEnabled doc) {
    if (doc == null)
        return -1;

    IndexReader reader = null;
    try {
        reader = getReader(doc.name());
        String pvalue = getField(doc, doc.getKeywordField());
        Term keyTerm = new Term(doc.getKeywordField(), pvalue);
        return reader.deleteDocuments(keyTerm);
    } catch (Exception e) {
        log.error("Error where delete index of " + doc, e);
    } finally {
        if (reader != null)
            try {
                reader.close();
            } catch (Exception e) {
                log.error("Error occur when closing IndexReader", e);
            } finally {
                reader = null;
            }
    }
    return -1;
}

From source file:com.lorelib.analyzer.sample.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 * /*from www .  j a  v  a2s .c o m*/
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text = "IK Analyzer???????";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer(true);

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc = new Document();
        doc.add(new StringField("ID", "10000", Field.Store.YES));
        doc.add(new TextField(fieldName, text, Field.Store.YES));
        iwriter.addDocument(doc);
        iwriter.close();

        //?**********************************
        //?
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.lucene.index.test.IKAnalyzerdemo.java

License:Apache License

/**
 * //from ww w .j  a v a  2s.  c om
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text1 = "oracle,?";
    String text2 = "?";
    String text3 = "?";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer();

    Directory directory1 = null;
    Directory directory2 = null;
    IndexWriter iwriter1 = null;
    IndexWriter iwriter2 = null;
    IndexReader ireader1 = null;
    IndexReader ireader2 = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory1 = new RAMDirectory();
        directory2 = new RAMDirectory();

        //?IndexWriterConfig

        IndexWriterConfig iwConfig1 = new IndexWriterConfig(analyzer);
        iwConfig1.setOpenMode(OpenMode.CREATE);

        IndexWriterConfig iwConfig2 = new IndexWriterConfig(analyzer);
        iwConfig2.setOpenMode(OpenMode.CREATE);
        iwriter1 = new IndexWriter(directory1, iwConfig1);
        iwriter2 = new IndexWriter(directory2, iwConfig2);

        //
        Document doc1 = new Document();
        doc1.add(new StringField("ID", "10000", Field.Store.YES));
        doc1.add(new TextField("text1", text1, Field.Store.YES));
        iwriter1.addDocument(doc1);

        Document doc2 = new Document();
        doc2.add(new StringField("ID", "10001", Field.Store.YES));
        doc2.add(new TextField("text2", text2, Field.Store.YES));
        iwriter2.addDocument(doc2);

        iwriter1.close();
        iwriter2.close();

        //?**********************************
        //?   
        ireader1 = DirectoryReader.open(directory1);
        ireader2 = DirectoryReader.open(directory2);

        IndexReader[] mreader = { ireader1, ireader2 };

        MultiReader multiReader = new MultiReader(mreader);

        isearcher = new IndexSearcher(multiReader);

        String keyword = "?";
        //QueryParser?Query
        String[] fields = { "text1", "text2" };

        Map<String, Float> boosts = new HashMap<String, Float>();
        boosts.put("text1", 5.0f);
        boosts.put("text2", 2.0f);
        /**MultiFieldQueryParser??? 
         * */
        MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer, boosts);
        Query query = parser.parse(keyword);

        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader1 != null) {
            try {
                ireader1.close();
                ireader2.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory1 != null) {
            try {
                directory1.close();
                directory2.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.lucene.index.test.IKAnalyzerdemoMutilField.java

License:Apache License

/**
 * //  w w  w.  j a v a2  s.com
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text1 = "oracle?";
    String text2 = "?";
    String text3 = "?";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer();

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig

        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc1 = new Document();
        doc1.add(new StringField("ID", "10000", Field.Store.YES));
        doc1.add(new TextField(fieldName, text1, Field.Store.YES));
        iwriter.addDocument(doc1);

        Document doc2 = new Document();
        doc2.add(new StringField("ID", "10000", Field.Store.YES));
        doc2.add(new TextField(fieldName, text2, Field.Store.YES));
        iwriter.addDocument(doc2);

        Document doc3 = new Document();
        doc3.add(new StringField("ID", "10000", Field.Store.YES));
        doc3.add(new TextField(fieldName, text3, Field.Store.YES));
        iwriter.addDocument(doc3);
        iwriter.close();

        //?**********************************
        //?   
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.m3958.apps.pcms.lucene.SearchFiles.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);//  ww w .jav a 2 s.c o  m
    }

    String index = "c:/lucene/index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    //    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);

    Analyzer analyzer = new ComplexAnalyzer();

    BufferedReader in = null;
    if (queries != null) {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(queries), "UTF-8"));
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    }
    QueryParser parser = new QueryParser(Version.LUCENE_45, field, analyzer);

    queryString = "cusvalue";
    while (true) {
        if (queries == null && queryString == null) {// prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        Query query = parser.parse(line);
        System.out.println("Searching for: " + query.toString(field));

        if (repeat > 0) {// repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, null, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:com.mathworks.xzheng.advsearching.FunctionQueryTest.java

License:Apache License

public void testRecency() throws Throwable {
    Directory dir = TestUtil.getBookIndexDirectory();
    IndexReader r = IndexReader.open(dir);
    IndexSearcher s = new IndexSearcher(r);
    s.setDefaultFieldSortScoring(true, true);

    QueryParser parser = new QueryParser(Version.LUCENE_46, "contents",
            new StandardAnalyzer(Version.LUCENE_46));
    Query q = parser.parse("java in action"); // #A
    Query q2 = new RecencyBoostingQuery(q, // #B
            2.0, 2 * 365, "pubmonthAsDay");
    Sort sort = new Sort(new SortField[] { SortField.FIELD_SCORE, new SortField("title2", SortField.STRING) });
    TopDocs hits = s.search(q2, null, 5, sort);

    for (int i = 0; i < hits.scoreDocs.length; i++) {
        Document doc = r.document(hits.scoreDocs[i].doc);
        System.out.println((1 + i) + ": " + doc.get("title") + ": pubmonth=" + doc.get("pubmonth") + " score="
                + hits.scoreDocs[i].score);
    }/*from  w  w  w .j a  v  a  2s . co m*/
    s.close();
    r.close();
    dir.close();
}

From source file:com.mathworks.xzheng.extsearch.payloads.PayloadsTest.java

License:Apache License

public void testPayloadTermQuery() throws Throwable {
    addDoc("Hurricane warning",
            "Bulletin: A hurricane warning was issued at " + "6 AM for the outer great banks");
    addDoc("Warning label maker", "The warning label maker is a delightful toy for "
            + "your precocious seven year old's warning needs");
    addDoc("Tornado warning",
            "Bulletin: There is a tornado warning for " + "Worcester county until 6 PM today");

    IndexReader r = writer.getReader();
    writer.close();//from  w  w  w.  jav a2s.c om

    IndexSearcher searcher = new IndexSearcher(r);

    searcher.setSimilarity(new BoostingSimilarity());

    Term warning = new Term("contents", "warning");

    Query query1 = new TermQuery(warning);
    System.out.println("\nTermQuery results:");
    TopDocs hits = searcher.search(query1, 10);
    TestUtil.dumpHits(searcher, hits);

    assertEquals("Warning label maker", // #B
            searcher.doc(hits.scoreDocs[0].doc).get("title")); // #B

    Query query2 = new PayloadTermQuery(warning, new AveragePayloadFunction());
    System.out.println("\nPayloadTermQuery results:");
    hits = searcher.search(query2, 10);
    TestUtil.dumpHits(searcher, hits);

    assertEquals("Warning label maker", // #C
            searcher.doc(hits.scoreDocs[2].doc).get("title")); // #C
    r.close();

}

From source file:com.mathworks.xzheng.searching.NearRealTimeTest.java

License:Apache License

public void testNearRealTime() throws Exception {
    Directory dir = new RAMDirectory();

    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_46,
            new StandardAnalyzer(Version.LUCENE_46));
    IndexWriter writer = new IndexWriter(dir, config);
    for (int i = 0; i < 10; i++) {
        Document doc = new Document();
        doc.add(new Field("id", "" + i, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
        doc.add(new Field("text", "aaa", Field.Store.NO, Field.Index.ANALYZED));
        writer.addDocument(doc);//from w  w w  .  ja v  a  2 s.  c  o  m
    }
    IndexReader reader = DirectoryReader.open(writer.getDirectory()); // #1
    IndexSearcher searcher = new IndexSearcher(reader); // #A

    Query query = new TermQuery(new Term("text", "aaa"));
    TopDocs docs = searcher.search(query, 1);
    assertEquals(10, docs.totalHits); // #B

    writer.deleteDocuments(new Term("id", "7")); // #2

    Document doc = new Document(); // #3
    doc.add(new Field("id", // #3
            "11", // #3
            Field.Store.NO, // #3
            Field.Index.NOT_ANALYZED_NO_NORMS)); // #3
    doc.add(new Field("text", // #3
            "bbb", // #3
            Field.Store.NO, // #3
            Field.Index.ANALYZED)); // #3
    writer.addDocument(doc); // #3

    //IndexReader newReader = reader.reopen();                 // #4
    IndexReader newReader = DirectoryReader.open(writer.getDirectory()); // #4
    assertFalse(reader == newReader); // #5
    reader.close(); // #6
    searcher = new IndexSearcher(newReader);

    TopDocs hits = searcher.search(query, 10); // #7
    assertEquals(9, hits.totalHits); // #7

    query = new TermQuery(new Term("text", "bbb")); // #8
    hits = searcher.search(query, 1); // #8
    assertEquals(1, hits.totalHits); // #8

    newReader.close();
    writer.close();
}

From source file:com.mathworks.xzheng.tools.BooksMoreLikeThis.java

License:Apache License

public static void main(String[] args) throws Throwable {

    String indexDir = System.getProperty("index.dir");
    FSDirectory directory = FSDirectory.open(new File(indexDir));
    IndexReader reader = IndexReader.open(directory);

    IndexSearcher searcher = new IndexSearcher(reader);

    int numDocs = reader.maxDoc();

    MoreLikeThis mlt = new MoreLikeThis(reader); // #A
    mlt.setFieldNames(new String[] { "title", "author" });
    mlt.setMinTermFreq(1); // #B
    mlt.setMinDocFreq(1);// w ww  .  j  av  a 2  s  .  co  m

    for (int docID = 0; docID < numDocs; docID++) { // #C
        System.out.println();
        Document doc = reader.document(docID);
        System.out.println(doc.get("title"));

        Query query = mlt.like(docID); // #D
        System.out.println("  query=" + query);

        TopDocs similarDocs = searcher.search(query, 10);
        if (similarDocs.totalHits == 0)
            System.out.println("  None like this");
        for (int i = 0; i < similarDocs.scoreDocs.length; i++) {
            if (similarDocs.scoreDocs[i].doc != docID) { // #E
                doc = reader.document(similarDocs.scoreDocs[i].doc);
                System.out.println("  -> " + doc.getField("title").stringValue());
            }
        }
    }

    reader.close();
    directory.close();
}

From source file:com.meizu.nlp.classification.utils.DataSplitterTest.java

License:Apache License

private static void closeQuietly(IndexReader reader) throws IOException {
    try {/*ww  w. ja v a2 s  .co  m*/
        if (reader != null)
            reader.close();
    } catch (Exception e) {
        // do nothing
    }
}