Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:net.java.ao.SearchableEntityManager.java

License:Apache License

@Override
public void delete(RawEntity<?>... entities) throws SQLException {
    super.delete(entities);

    IndexReader reader = null;
    try {/*www  .j  a  va 2s.c o m*/
        reader = IndexReader.open(indexDir);
        for (RawEntity<?> entity : entities) {
            removeFromIndexImpl(entity, reader);
        }
    } catch (IOException e) {
        throw (SQLException) new SQLException().initCause(e);
    } finally {
        if (reader != null) {
            try {
                reader.close();
            } catch (IOException e) {
            }
        }
    }
}

From source file:net.java.ao.SearchableEntityManager.java

License:Apache License

/**
 * Removes the specified entity from the Lucene index.  This performs a lookup
 * in the index based on the value of the entity primary key and removes the
 * appropriate {@link Document}./*from  www.ja  va 2 s.  c o  m*/
 * 
 * @param entity   The entity to remove from the index.
 * @throws IOException      If Lucene was unable to open the index.
 */
public void removeFromIndex(RawEntity<?> entity) throws IOException {
    IndexReader reader = null;
    try {
        reader = IndexReader.open(indexDir);
        removeFromIndexImpl(entity, reader);
    } finally {
        if (reader != null) {
            reader.close();
        }
    }
}

From source file:net.java.ao.SearchTest.java

License:Apache License

@Test
public void testOptimize() throws CorruptIndexException, IOException {
    IndexReader reader = IndexReader.open(manager.getIndexDir());
    assertFalse(reader.isOptimized());//from  ww  w. j ava 2s  .  c  o  m
    reader.close();

    manager.optimize();

    reader = IndexReader.open(manager.getIndexDir());
    assertTrue(reader.isOptimized());
    reader.close();
}

From source file:net.mumie.cocoon.search.IndexerImpl.java

License:Open Source License

public void updateDocument(int type, int id, int previousId) {
    final String METHOD_NAME = "updateDocument";
    if (this.existsIndex) {
        try {/*w  ww.jav a  2s  .  c  om*/
            if (this.indexDir == null)
                this.indexDir = FSDirectory.getDirectory(this.indexDirPath, false);
            this.rdfUtils.getModelFromFile();
            IndexReader reader = IndexReader.open(this.indexDir);
            deleteDocument(type, previousId, reader);
            reader.close();
            addDocument(type, id);
            this.rdfUtils.writeModelToFile();
        } catch (IOException i) {
            this.getLogger().error(METHOD_NAME + " exception: " + i);
        }
    } else
        this.getLogger().warn(METHOD_NAME + " Could not find index to change.");
}

From source file:net.mumie.cocoon.search.IndexerImpl.java

License:Open Source License

/**
 * Updates a set of meta-infos from same type as Lucene-<code>Document</code>-objects in index. 
 * Update includes documents with same JAPS-<code>type</code> and same JAPS-<code>Id</code>.
 * @throws IOException/*  w  w w  .  j ava 2s  .c o  m*/
 * @throws SQLException
 * @throws SAXException
 */
private void updateDocuments(ResultSet resultSet, int type) throws IOException, SAXException, SQLException {
    if (this.indexDir == null)
        this.indexDir = FSDirectory.getDirectory(this.indexDirPath, false);
    if (!(resultSet == null)) {
        this.rdfUtils.getModelFromFile();
        IndexReader reader = IndexReader.open(this.indexDir);
        resultSet.beforeFirst();
        while (resultSet.next())
            deleteDocument(type, resultSet.getInt(DbColumn.ID), reader);
        reader.close();
        IndexWriter writer = new IndexWriter(this.indexDir, this.analyzer, false);
        addDocuments(resultSet, type, writer);
        writer.optimize();
        writer.close();
        this.rdfUtils.writeModelToFile();
    }
}

From source file:net.mumie.cocoon.search.IndexerImpl.java

License:Open Source License

public void deleteDocument(int type, int id) {
    final String METHOD_NAME = "deleteDocument";
    try {/*  w  w w.j a  v a2  s .  c o  m*/
        if (this.indexDir == null)
            this.indexDir = FSDirectory.getDirectory(this.indexDirPath, false);
        IndexReader reader = IndexReader.open(this.indexDir);
        deleteDocument(type, id, reader);
        reader.close();
    } catch (IOException i) {
        this.getLogger().error(METHOD_NAME + " exception: " + i);
    }
}

From source file:net.riezebos.thoth.content.search.Searcher.java

License:Apache License

public PagedList<SearchResult> search(Identity identity, String queryExpression, int pageNumber, int pageSize)
        throws SearchException {
    try {/*www  . j a  va  2s  .  c  o  m*/
        IndexReader reader = getIndexReader(contentManager);
        IndexSearcher searcher = getIndexSearcher(reader);
        Analyzer analyzer = new StandardAnalyzer();

        // We might need to restrict the results to books of the user does not have access to fragments:
        AccessManager accessManager = contentManager.getAccessManager();
        boolean booksOnly = !accessManager.hasPermission(identity, "", Permission.READ_FRAGMENTS);
        if (booksOnly) {
            queryExpression = Indexer.INDEX_TYPE + ":" + Indexer.TYPE_DOCUMENT + " AND (" + queryExpression
                    + ")";
        }

        QueryParser parser = new QueryParser(Indexer.INDEX_CONTENTS, analyzer);
        Query query = parser.parse(queryExpression);

        // We add 1 to determine if there is more to be found after the current page
        int maxResults = pageSize * pageNumber + 1;
        TopDocs results = searcher.search(query, maxResults, Sort.RELEVANCE);
        ScoreDoc[] hits = results.scoreDocs;

        boolean hadMore = (hits.length == maxResults);

        List<SearchResult> searchResults = new ArrayList<>();
        int idx = 0;
        for (ScoreDoc scoreDoc : hits) {
            if (searchResults.size() == pageSize)
                break;
            idx++;
            if (idx >= (pageNumber - 1) * pageSize) {
                Document document = searcher.doc(scoreDoc.doc);
                IndexableField field = document.getField(Indexer.INDEX_PATH);
                String documentPath = field.stringValue();
                SearchResult searchResult = new SearchResult();
                searchResult.setIndexNumber((pageNumber - 1) * pageSize + idx);
                searchResult.setDocument(documentPath);

                String type = document.get(Indexer.INDEX_TYPE);
                if (Indexer.TYPE_DOCUMENT.equals(type) || Indexer.TYPE_FRAGMENT.equals(type)) {
                    searchResult.setResource(false);

                    try {
                        MarkDownDocument markDownDocument = contentManager.getMarkDownDocument(documentPath,
                                true, CriticProcessingMode.DO_NOTHING);
                        String contents = markDownDocument.getMarkdown();

                        SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter();
                        Highlighter highlighter = new Highlighter(htmlFormatter,
                                new QueryScorer(query, Indexer.INDEX_CONTENTS));
                        highlighter.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);

                        TokenStream tokenStream = analyzer.tokenStream(Indexer.INDEX_CONTENTS, contents);

                        TextFragment[] frags = highlighter.getBestTextFragments(tokenStream, contents, false,
                                99999);
                        for (TextFragment frag : frags) {
                            if ((frag != null) && (frag.getScore() > 0)) {
                                String fragmentText = frag.toString();
                                searchResult.addFragment(
                                        new Fragment(ThothCoreUtil.escapeHtmlExcept("B", fragmentText)));
                            }
                        }
                    } catch (FileNotFoundException e) {
                        LOG.warn(
                                "Index contains an invalid file reference); probably need to reindex to get rid of this. File: "
                                        + e.getMessage());
                    }
                } else {
                    searchResult.setResource(true);
                    String extension = ThothUtil.getExtension(documentPath);
                    searchResult.setImage(getConfiguration().isImageExtension(extension));

                    searchResult.addFragment(new Fragment(document.get(Indexer.INDEX_TITLE)));
                }
                searchResults.add(searchResult);
            }
        }
        reader.close();
        linkBooks(searchResults);
        PagedList<SearchResult> pagedList = new PagedList<>(searchResults, hadMore);
        return pagedList;
    } catch (Exception e) {
        throw new SearchException(e);
    }
}

From source file:net.semanticmetadata.lire.benchmarking.TestNister.java

License:Open Source License

public void createVocabulary(String pathName) throws IOException {
    // first: copy index to a new location.
    FileUtils.copyDirectory(new File("nisterindex"), new File(pathName));
    System.out.println("Index copied to " + pathName + ".");
    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(pathName)));
    //        SiftFeatureHistogramBuilder sfh = new SiftFeatureHistogramBuilder(reader, 1000, numVisualWords);
    BOVWBuilder sfh = new BOVWBuilder(reader, new SurfFeature(), 2000, numVisualWords);
    sfh.index();/*from   w ww.  java2 s. com*/
    reader.close();
}

From source file:net.semanticmetadata.lire.indexing.IndexVisualWordsTest.java

License:Open Source License

public void testIndexMissingFiles() throws IOException {
    // first delete some of the existing ones ...
    System.out.println("Deleting visual words from docs ...");
    IndexReader ir = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexWriter iw = LuceneUtils.createIndexWriter(index, false);
    int maxDocs = ir.maxDoc();
    for (int i = 0; i < maxDocs / 10; i++) {
        Document d = ir.document(i);
        //            d.removeFields(DocumentBuilder.FIELD_NAME_SURF + DocumentBuilder.FIELD_NAME_BOVW);
        d.removeFields(DocumentBuilder.FIELD_NAME_SURF + DocumentBuilder.FIELD_NAME_BOVW);
        //            d.removeFields(DocumentBuilder.FIELD_NAME_SURF_LOCAL_FEATURE_HISTOGRAM);
        d.removeFields(DocumentBuilder.FIELD_NAME_SURF + DocumentBuilder.FIELD_NAME_BOVW_VECTOR);
        //            d.removeFields(DocumentBuilder.FIELD_NAME_SURF);
        iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER,
                d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
    }//from   w  w  w  . j  a v a 2s.  co  m
    System.out.println("# of deleted docs:  " + maxDocs / 10);
    System.out.println("Optimizing and closing ...");
    iw.close();
    ir.close();
    System.out.println("Creating new visual words ...");
    BOVWBuilder surfFeatureHistogramBuilder = new BOVWBuilder(
            DirectoryReader.open(FSDirectory.open(new File(index))), new SurfFeature(), numSamples, clusters);
    //        surfFeatureHistogramBuilder.indexMissing();
    //        System.out.println("Finished.");
}

From source file:net.sf.jclal.util.dataset.LuceneIndexToWekaDataSet.java

License:Open Source License

/**
 * Close the file readers/*from ww w .j  av a  2  s. c  o m*/
 *
 * @param searcher The searcher to use.
 * @param reader The reader to use.
 * @throws IOException The exception that can be launched.
 */
public static void closeReaders(IndexSearcher searcher, IndexReader reader) throws IOException {
    searcher.close();
    reader.close();
}