Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:org.apache.jackrabbit.core.query.lucene.SearchIndex.java

License:Apache License

/**
 * Retrieves the root of the indexing aggregate for <code>removedNodeIds</code>
 * and puts it into <code>map</code>.
 *
 * @param removedNodeIds the ids of removed nodes.
 * @param map            aggregate roots are collected in this map.
 *                       Key=NodeId, value=NodeState.
 *///from  w w w.j  ava2s  .  c  o m
protected void retrieveAggregateRoot(Set removedNodeIds, Map map) {
    if (indexingConfig != null) {
        AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
        if (aggregateRules == null) {
            return;
        }
        int found = 0;
        long time = System.currentTimeMillis();
        try {
            IndexReader reader = index.getIndexReader();
            try {
                Term aggregateUUIDs = new Term(FieldNames.AGGREGATED_NODE_UUID, "");
                TermDocs tDocs = reader.termDocs();
                try {
                    ItemStateManager ism = getContext().getItemStateManager();
                    for (Iterator it = removedNodeIds.iterator(); it.hasNext();) {
                        NodeId id = (NodeId) it.next();
                        aggregateUUIDs = aggregateUUIDs.createTerm(id.getUUID().toString());
                        tDocs.seek(aggregateUUIDs);
                        while (tDocs.next()) {
                            Document doc = reader.document(tDocs.doc());
                            String uuid = doc.get(FieldNames.UUID);
                            NodeId nId = new NodeId(UUID.fromString(uuid));
                            map.put(nId, ism.getItemState(nId));
                            found++;
                        }
                    }
                } finally {
                    tDocs.close();
                }
            } finally {
                reader.close();
            }
        } catch (Exception e) {
            log.warn("Exception while retrieving aggregate roots", e);
        }
        time = System.currentTimeMillis() - time;
        log.debug("Retrieved {} aggregate roots in {} ms.", new Integer(found), new Long(time));
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.ChainedTermEnumTest.java

License:Apache License

protected TermEnum createTermEnum(String prefix, int numTerms) throws IOException {
    Directory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir,
            new IndexWriterConfig(Version.LUCENE_36, new StandardAnalyzer(Version.LUCENE_36)));
    try {/*from  w w  w . ja va 2s.  com*/
        for (int i = 0; i < numTerms; i++) {
            Document doc = new Document();
            doc.add(new Field("field", true, prefix + i, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS,
                    Field.TermVector.NO));
            writer.addDocument(doc);
        }
    } finally {
        writer.close();
    }
    IndexReader reader = IndexReader.open(dir);
    try {
        TermEnum terms = reader.terms();
        if (terms.term() == null) {
            // position at first term
            terms.next();
        }
        return terms;
    } finally {
        reader.close();
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.IndexMigration.java

License:Apache License

/**
 * Checks if the given <code>index</code> needs to be migrated.
 *
 * @param index the index to check and migration if needed.
 * @param directoryManager the directory manager.
 * @param oldSeparatorChar the old separator char that needs to be replaced.
 * @throws IOException if an error occurs while migrating the index.
 *//* w  w  w  .j av a  2  s  .c  o m*/
public static void migrate(PersistentIndex index, DirectoryManager directoryManager, char oldSeparatorChar)
        throws IOException {
    Directory indexDir = index.getDirectory();
    log.debug("Checking {} ...", indexDir);
    ReadOnlyIndexReader reader = index.getReadOnlyIndexReader();
    try {
        if (IndexFormatVersion.getVersion(reader).getVersion() >= IndexFormatVersion.V3.getVersion()) {
            // index was created with Jackrabbit 1.5 or higher
            // no need for migration
            log.debug("IndexFormatVersion >= V3, no migration needed");
            return;
        }
        // assert: there is at least one node in the index, otherwise the
        //         index format version would be at least V3
        TermEnum terms = reader.terms(new Term(FieldNames.PROPERTIES, ""));
        try {
            Term t = terms.term();
            if (t.text().indexOf(oldSeparatorChar) == -1) {
                log.debug("Index already migrated");
                return;
            }
        } finally {
            terms.close();
        }
    } finally {
        reader.release();
        index.releaseWriterAndReaders();
    }

    // if we get here then the index must be migrated
    log.debug("Index requires migration {}", indexDir);

    String migrationName = index.getName() + "_v36";
    if (directoryManager.hasDirectory(migrationName)) {
        directoryManager.delete(migrationName);
    }

    Directory migrationDir = directoryManager.getDirectory(migrationName);
    final IndexWriterConfig c = new IndexWriterConfig(Version.LUCENE_36, new JackrabbitAnalyzer());
    c.setMergePolicy(new UpgradeIndexMergePolicy(new LogByteSizeMergePolicy()));
    c.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
    try {
        IndexWriter writer = new IndexWriter(migrationDir, c);
        try {
            IndexReader r = new MigrationIndexReader(IndexReader.open(index.getDirectory()), oldSeparatorChar);
            try {
                writer.addIndexes(r);
                writer.forceMerge(1);
                writer.close();
            } finally {
                r.close();
            }
        } finally {
            writer.close();
        }
    } finally {
        migrationDir.close();
    }
    directoryManager.delete(index.getName());
    if (!directoryManager.rename(migrationName, index.getName())) {
        throw new IOException("failed to move migrated directory " + migrationDir);
    }
    log.info("Migrated " + index.getName());
}

From source file:org.apache.jackrabbit.core.query.lucene.Util.java

License:Apache License

/**
 * Depending on the type of the <code>reader</code> this method either
 * closes or releases the reader. The reader is released if it implements
 * {@link ReleaseableIndexReader}.//w ww . jav  a2  s.co m
 * 
 * @param reader
 *            the index reader to close or release.
 * @throws IOException
 *             if an error occurs while closing or releasing the index
 *             reader.
 */
public static void closeOrRelease(IndexReader reader) throws IOException {
    if (reader instanceof ReleaseableIndexReader) {
        ((ReleaseableIndexReader) reader).release();
    } else {
        reader.close();
    }
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexTest.java

License:Apache License

public int getDeletedDocCount(NodeBuilder idx, IndexDefinition definition) throws IOException {
    IndexReader reader = DirectoryReader.open(newIndexDirectory(definition, idx));
    int numDeletes = reader.numDeletedDocs();
    reader.close();
    return numDeletes;
}

From source file:org.apache.jetspeed.services.search.lucene.LuceneSearchService.java

License:Apache License

/**
 * /*from  ww  w .  j a  va  2s .  c  om*/
 * @see org.apache.jetspeed.services.search.SearchService#remove(java.lang.Collection)
 * @param c
 * @return 
 */
public boolean remove(Collection c) {
    boolean result = false;

    try {
        IndexReader indexReader = IndexReader.open(this.rootDir);

        Iterator it = c.iterator();
        while (it.hasNext()) {
            Object o = it.next();
            // Look up appropriate handler
            ObjectHandler handler = HandlerFactory.getHandler(o);

            // Parse the object
            ParsedObject parsedObject = handler.parseObject(o);

            // Create term
            Term term = null;

            if (parsedObject.getKey() != null) {
                term = new Term(ParsedObject.FIELDNAME_KEY, parsedObject.getKey());
                // Remove the document from search index
                int rc = indexReader.delete(term);
                logger.info(
                        "Attempted to delete '" + term.toString() + "' from index, documents deleted = " + rc);
                //System.out.println("Attempted to delete '" + term.toString() + "' from index, documents deleted = " + rc);
                result = rc > 0;
            }
        }

        indexReader.close();

        IndexWriter indexWriter = new IndexWriter(rootDir, new StandardAnalyzer(), false);
        indexWriter.optimize();
        indexWriter.close();

    } catch (Exception e) {
        logger.error("Exception", e);
        result = false;
    }

    return result;
}

From source file:org.apache.mahout.text.SequenceFilesFromLuceneStorage.java

License:Apache License

/**
 * Generates a sequence files from a Lucene index via the given {@link LuceneStorageConfiguration}
 *
 * @param lucene2seqConf configuration bean
 * @throws java.io.IOException if index cannot be opened or sequence file could not be written
 *///  ww  w .j  a v  a 2 s.c  om
public void run(final LuceneStorageConfiguration lucene2seqConf) throws IOException {
    List<Path> indexPaths = lucene2seqConf.getIndexPaths();
    int processedDocs = 0;

    for (Path indexPath : indexPaths) {
        Directory directory = FSDirectory.open(new File(indexPath.toUri().getPath()));
        IndexReader reader = DirectoryReader.open(directory);
        IndexSearcher searcher = new IndexSearcher(reader);

        LuceneIndexHelper.fieldShouldExistInIndex(reader, lucene2seqConf.getIdField());
        for (String field : lucene2seqConf.getFields()) {
            LuceneIndexHelper.fieldShouldExistInIndex(reader, field);
        }

        Configuration configuration = lucene2seqConf.getConfiguration();
        FileSystem fileSystem = FileSystem.get(configuration);
        Path sequenceFilePath = new Path(lucene2seqConf.getSequenceFilesOutputPath(), indexPath.getName());
        final SequenceFile.Writer sequenceFileWriter = new SequenceFile.Writer(fileSystem, configuration,
                sequenceFilePath, Text.class, Text.class);

        SeqFileWriterCollector writerCollector = new SeqFileWriterCollector(lucene2seqConf, sequenceFileWriter,
                processedDocs);
        searcher.search(lucene2seqConf.getQuery(), writerCollector);
        log.info("Wrote " + writerCollector.processedDocs + " documents in " + sequenceFilePath.toUri());
        processedDocs = writerCollector.processedDocs;
        Closeables.close(sequenceFileWriter, false);
        directory.close();
        //searcher.close();
        reader.close();
    }
}

From source file:org.apache.marmotta.ucuenca.wk.commons.impl.CommonsServicesImpl.java

@Override
public String getIndexedPublicationsFilter(String querystr) {

    try {//  w w w . ja v a 2  s .c o m

        // Create path and index
        Path p1 = Paths.get("idxCentralGraph");
        FSDirectory index = FSDirectory.open(p1);

        //IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_40, analyzer);
        //IndexWriter writer = new IndexWriter(dir, config);
        // 0. Specify the analyzer for tokenizing text.
        //    The same analyzer should be used for indexing and searching
        StandardAnalyzer analyzer = new StandardAnalyzer();

        // 1. query
        Query q = new QueryParser("title", analyzer).parse(querystr);

        // 2. search
        int hitsPerPage = 20;
        IndexReader reader = DirectoryReader.open(index);
        IndexSearcher searcher = new IndexSearcher(reader);
        TopDocs docs = searcher.search(q, hitsPerPage);
        ScoreDoc[] hits = docs.scoreDocs;

        // 3. display results
        String filter = "";
        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = searcher.doc(docId);
            if (i == 0) {
                filter = "regex(str(?publicationUri), \"" + d.get("id") + "\" )";
            } else {
                filter += "|| regex(str(?publicationUri), \"" + d.get("id") + "\" )";
            }
        }

        // reader can only be closed when there
        // is no need to access the documents any more.
        reader.close();

        return filter;
    } catch (InvalidArgumentException ex) {
        return "error:  " + ex;
    } catch (IOException ex) {
        return "error:  " + ex;
    } catch (ParseException ex) {
        java.util.logging.Logger.getLogger(CommonsServicesImpl.class.getName()).log(Level.SEVERE, null, ex);
    }
    return "";
}

From source file:org.apache.maven.index.context.DefaultIndexingContext.java

License:Apache License

public synchronized void merge(Directory directory, DocumentFilter filter) throws IOException {
    final IndexSearcher s = acquireIndexSearcher();
    try {//w w w .j  av a 2 s  . c  o  m
        final IndexWriter w = getIndexWriter();
        final IndexReader directoryReader = DirectoryReader.open(directory);
        TopScoreDocCollector collector = null;
        try {
            int numDocs = directoryReader.maxDoc();

            Bits liveDocs = MultiFields.getLiveDocs(directoryReader);
            for (int i = 0; i < numDocs; i++) {
                if (liveDocs != null && !liveDocs.get(i)) {
                    continue;
                }

                Document d = directoryReader.document(i);
                if (filter != null && !filter.accept(d)) {
                    continue;
                }

                String uinfo = d.get(ArtifactInfo.UINFO);
                if (uinfo != null) {
                    collector = TopScoreDocCollector.create(1);
                    s.search(new TermQuery(new Term(ArtifactInfo.UINFO, uinfo)), collector);
                    if (collector.getTotalHits() == 0) {
                        w.addDocument(IndexUtils.updateDocument(d, this, false));
                    }
                } else {
                    String deleted = d.get(ArtifactInfo.DELETED);

                    if (deleted != null) {
                        // Deleting the document loses history that it was delete,
                        // so incrementals wont work. Therefore, put the delete
                        // document in as well
                        w.deleteDocuments(new Term(ArtifactInfo.UINFO, deleted));
                        w.addDocument(d);
                    }
                }
            }

        } finally {
            directoryReader.close();
            commit();
        }

        rebuildGroups();
        Date mergedTimestamp = IndexUtils.getTimestamp(directory);

        if (getTimestamp() != null && mergedTimestamp != null && mergedTimestamp.after(getTimestamp())) {
            // we have both, keep the newest
            updateTimestamp(true, mergedTimestamp);
        } else {
            updateTimestamp(true);
        }
        optimize();
    } finally {
        releaseIndexSearcher(s);
    }
}