Example usage for org.apache.lucene.index IndexReader incRef

List of usage examples for org.apache.lucene.index IndexReader incRef

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader incRef.

Prototype

public final void incRef() 

Source Link

Document

Expert: increments the refCount of this IndexReader instance.

Usage

From source file:net.sf.lucis.core.impl.AbstractStore.java

License:Apache License

private void setManagedReader(IndexReader r) {
    r.incRef();
    reader = r;
    lastManaged = true;
    watch.reset().start();
}

From source file:org.alfresco.repo.search.impl.lucene.FilterIndexReaderByStringId.java

License:Open Source License

/**
 * Apply the filter/*from w  w w  .  j  a  v a  2s.co  m*/
 * 
 * @param id String
 * @param reader IndexReader
 * @param deleteNodesOnly boolean
 */
public FilterIndexReaderByStringId(String id, IndexReader reader, Set<String> deletions,
        Set<String> containerDeletions, boolean deleteNodesOnly) {
    super(reader);
    reader.incRef();
    this.id = id;
    this.deletions = deletions;
    this.containerDeletions = containerDeletions;
    this.deleteNodesOnly = deleteNodesOnly;

    if (s_logger.isDebugEnabled()) {
        s_logger.debug("Applying deletions FOR " + id
                + " (the index ito which these are applied is the previous one ...)");
    }

}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

License:Open Source License

/**
 * Get the main reader for committed index data
 * //from  w  w  w  .  ja va  2 s.  c  o m
 * @return IndexReader
 * @throws IOException
 */
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader() throws IOException {
    getReadLock();
    try {
        // Check if we need to rebuild the main indexer as it is invalid.
        // (it is shared and quick version check fails)
        if (indexIsShared && !checkVersion()) {
            releaseReadLock();
            getWriteLock();
            try {
                if (mainIndexReader != null) {
                    ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
                }
                mainIndexReader = null;
            } finally {
                getReadLock();
                releaseWriteLock();
            }
        }

        // Build if required
        if (mainIndexReader == null) {
            releaseReadLock();
            getWriteLock();
            try {
                if (mainIndexReader == null) {
                    // Sync with disk image if required
                    doWithFileLock(new LockWork<Object>() {
                        public Object doWork() throws Exception {
                            return null;
                        }

                        public boolean canRetry() {
                            return true;
                        }

                    });
                    mainIndexReader = createMainIndexReader();

                }

            } finally {
                getReadLock();
                releaseWriteLock();
            }
        }
        // Manage reference counting
        mainIndexReader.incRef();
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Main index reader references = "
                    + ((ReferenceCounting) mainIndexReader).getReferenceCount());
        }

        // ALF-10040: Wrap with a one-off CachingIndexReader (with cache disabled) so that LeafScorer behaves and passes through SingleFieldSelectors to the main index readers
        IndexReader reader = ReferenceCountingReadOnlyIndexReaderFactory
                .createReader(MAIN_READER + GUID.generate(), mainIndexReader, false, config);
        ReferenceCounting refCounting = (ReferenceCounting) reader;
        reader.incRef();
        refCounting.setInvalidForReuse();
        return reader;
    } catch (RuntimeException e) {
        e.printStackTrace();
        throw e;
    } finally {
        releaseReadLock();
    }
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

License:Open Source License

/**
 * Get the main index reader augmented with the specified TX data As above but we add the TX data
 * /*from w w w . jav  a2 s  . co m*/
 * @param id String
 * @param deleteOnlyNodes boolean
 * @return IndexReader
 * @throws IOException
 */
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<String> deletions,
        Set<String> containerDeletions, boolean deleteOnlyNodes) throws IOException {
    if (id == null) {
        throw new IndexerException("\"null\" is not a valid identifier for a transaction");
    }
    getReadLock();
    try {
        if (indexIsShared && !checkVersion()) {
            releaseReadLock();
            getWriteLock();
            try {
                if (mainIndexReader != null) {
                    ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
                }
                mainIndexReader = null;
            } finally {
                getReadLock();
                releaseWriteLock();
            }
        }

        if (mainIndexReader == null) {
            releaseReadLock();
            getWriteLock();
            try {
                if (mainIndexReader == null) {
                    // Sync with disk image if required
                    doWithFileLock(new LockWork<Object>() {
                        public Object doWork() throws Exception {
                            return null;
                        }

                        public boolean canRetry() {
                            return true;
                        }

                    });
                    mainIndexReader = createMainIndexReader();

                }
            } finally {
                getReadLock();
                releaseWriteLock();
            }
        }
        // Combine the index delta with the main index
        // Make sure the index is written to disk
        // TODO: Should use the in memory index but we often end up forcing
        // to disk anyway.
        // Is it worth it?
        // luceneIndexer.flushPending();

        IndexReader deltaReader = buildAndRegisterDeltaReader(id);
        IndexReader reader = null;
        if ((deletions == null || deletions.size() == 0)
                && (containerDeletions == null || containerDeletions.size() == 0)) {
            reader = new MultiReader(new IndexReader[] { mainIndexReader, deltaReader }, false);
        } else {
            IndexReader filterReader = new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions,
                    containerDeletions, deleteOnlyNodes);
            reader = new MultiReader(new IndexReader[] { filterReader, deltaReader }, false);
            // Cancel out extra incRef made by MultiReader
            filterReader.decRef();
        }

        // The reference count would have been incremented automatically by MultiReader /
        // FilterIndexReaderByStringId
        deltaReader.decRef();
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Main index reader references = "
                    + ((ReferenceCounting) mainIndexReader).getReferenceCount());
        }
        reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER + id, reader, false,
                config);
        ReferenceCounting refCounting = (ReferenceCounting) reader;
        reader.incRef();
        refCounting.setInvalidForReuse();
        return reader;
    } finally {
        releaseReadLock();
    }
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

License:Open Source License

private IndexReader createMainIndexReader() throws IOException {
    IndexReader reader = null;
    IndexReader oldReader = null;/*from   w  ww .  j a v  a 2 s . c om*/
    for (String id : indexEntries.keySet()) {
        IndexEntry entry = indexEntries.get(id);
        if (entry.getStatus().isCommitted()) {
            IndexReader subReader = getReferenceCountingIndexReader(id);
            if (reader == null) {
                reader = subReader;
            } else {
                boolean oldReaderIsSubReader = oldReader == null;
                oldReader = reader;
                reader = mainIndexReaders.get(id);
                if (reader == null) {
                    if (entry.getType() == IndexType.INDEX) {
                        reader = new MultiReader(new IndexReader[] { oldReader, subReader }, false);
                    } else if (entry.getType() == IndexType.DELTA) {
                        try {
                            IndexReader filterReader = new FilterIndexReaderByStringId(id, oldReader,
                                    getDeletions(entry.getName(), INDEX_INFO_DELETIONS),
                                    getDeletions(entry.getName(), INDEX_INFO_CONTAINER_DELETIONS),
                                    entry.isDeletOnlyNodes());
                            reader = new MultiReader(new IndexReader[] { filterReader, subReader }, false);
                            // Cancel out the incRef on the filter reader
                            filterReader.decRef();
                        } catch (IOException ioe) {
                            s_logger.error("Failed building filter reader beneath " + entry.getName(), ioe);
                            throw ioe;
                        }
                    }
                    reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(id + "multi", reader,
                            true, config);
                    mainIndexReaders.put(id, reader);
                }
            }
        }
    }
    if (reader == null) {
        reader = IndexReader.open(emptyIndex);
    } else {
        // Keep this reader open whilst it is referenced by mainIndexReaders / referenceCountingReadOnlyIndexReaders
        reader.incRef();
    }

    reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER, reader, false, config);
    return reader;
}

From source file:org.apache.blur.manager.writer.BlurIndexSimpleWriter.java

License:Apache License

public IndexSearcherCloseable getIndexSearcher(boolean security) throws IOException {
    final IndexReader indexReader;
    _indexRefreshReadLock.lock();//  w w w.  jav  a 2 s .  c  om
    try {
        indexReader = _indexReader.get();
        indexReader.incRef();
    } finally {
        _indexRefreshReadLock.unlock();
    }
    if (indexReader instanceof ExitableReader) {
        ((ExitableReader) indexReader).reset();
    }
    if (security) {
        return getSecureIndexSearcher(indexReader);
    } else {
        return getInsecureIndexSearcher(indexReader);
    }
}

From source file:org.apache.solr.search.TestStressLucene.java

License:Apache License

@Test
public void testStressLuceneNRT() throws Exception {
    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = 1 + random().nextInt(5);
    final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200));
    int nWriteThreads = 5 + random().nextInt(25);

    final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers

    final AtomicLong operations = new AtomicLong(100000); // number of query operations to perform in total
    int nReadThreads = 5 + random().nextInt(25);
    final boolean tombstones = random().nextBoolean();
    final boolean syncCommits = random().nextBoolean();

    verbose("commitPercent=", commitPercent);
    verbose("softCommitPercent=", softCommitPercent);
    verbose("deletePercent=", deletePercent);
    verbose("deleteByQueryPercent=", deleteByQueryPercent);
    verbose("ndocs=", ndocs);
    verbose("nWriteThreads=", nWriteThreads);
    verbose("nReadThreads=", nReadThreads);
    verbose("maxConcurrentCommits=", maxConcurrentCommits);
    verbose("operations=", operations);
    verbose("tombstones=", tombstones);
    verbose("syncCommits=", syncCommits);

    initModel(ndocs);//ww w  .j av a  2 s  .  c o m

    final AtomicInteger numCommitting = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();

    final FieldType idFt = new FieldType();
    idFt.setIndexed(true);
    idFt.setStored(true);
    idFt.setOmitNorms(true);
    idFt.setTokenized(false);
    idFt.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);

    final FieldType ft2 = new FieldType();
    ft2.setIndexed(false);
    ft2.setStored(true);

    // model how solr does locking - only allow one thread to do a hard commit at once, and only one thread to do a soft commit, but
    // a hard commit in progress does not stop a soft commit.
    final Lock hardCommitLock = syncCommits ? new ReentrantLock() : null;
    final Lock reopenLock = syncCommits ? new ReentrantLock() : null;

    // RAMDirectory dir = new RAMDirectory();
    // final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)));

    Directory dir = newDirectory();

    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    writer.setDoRandomForceMergeAssert(false);

    // writer.commit();
    // reader = IndexReader.open(dir);
    // make this reader an NRT reader from the start to avoid the first non-writer openIfChanged
    // to only opening at the last commit point.
    reader = DirectoryReader.open(writer.w, true);

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.get() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
                                Map<Integer, DocInfo> newCommittedModel;
                                long version;
                                DirectoryReader oldReader;

                                boolean softCommit = rand.nextInt(100) < softCommitPercent;

                                if (!softCommit) {
                                    // only allow one hard commit to proceed at once
                                    if (hardCommitLock != null)
                                        hardCommitLock.lock();
                                    verbose("hardCommit start");

                                    writer.commit();
                                }

                                if (reopenLock != null)
                                    reopenLock.lock();

                                synchronized (globalLock) {
                                    newCommittedModel = new HashMap<Integer, DocInfo>(model); // take a snapshot
                                    version = snapshotCount++;
                                    oldReader = reader;
                                    oldReader.incRef(); // increment the reference since we will use this for reopening
                                }

                                if (!softCommit) {
                                    // must commit after taking a snapshot of the model
                                    // writer.commit();
                                }

                                verbose("reopen start using", oldReader);

                                DirectoryReader newReader;
                                if (softCommit) {
                                    newReader = DirectoryReader.openIfChanged(oldReader, writer.w, true);
                                } else {
                                    // will only open to last commit
                                    newReader = DirectoryReader.openIfChanged(oldReader);
                                }

                                if (newReader == null) {
                                    oldReader.incRef();
                                    newReader = oldReader;
                                }
                                oldReader.decRef();

                                verbose("reopen result", newReader);

                                synchronized (globalLock) {
                                    assert newReader.getRefCount() > 0;
                                    assert reader.getRefCount() > 0;

                                    // install the new reader if it's newest (and check the current version since another reader may have already been installed)
                                    if (newReader.getVersion() > reader.getVersion()) {
                                        reader.decRef();
                                        reader = newReader;

                                        // install this snapshot only if it's newer than the current one
                                        if (version >= committedModelClock) {
                                            committedModel = newCommittedModel;
                                            committedModelClock = version;
                                        }

                                    } else {
                                        // close if unused
                                        newReader.decRef();
                                    }

                                }

                                if (reopenLock != null)
                                    reopenLock.unlock();

                                if (!softCommit) {
                                    if (hardCommitLock != null)
                                        hardCommitLock.unlock();
                                }

                            }
                            numCommitting.decrementAndGet();
                            continue;
                        }

                        int id = rand.nextInt(ndocs);
                        Object sync = syncArr[id];

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        // We can't concurrently update the same document and retain our invariants of increasing values
                        // since we can't guarantee what order the updates will be executed.
                        synchronized (sync) {
                            DocInfo info = model.get(id);
                            long val = info.val;
                            long nextVal = Math.abs(val) + 1;

                            if (oper < commitPercent + deletePercent) {
                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleting id", id, "val=", nextVal);
                                writer.deleteDocuments(new Term("id", Integer.toString(id)));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleting id", id, "val=", nextVal, "DONE");

                            } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                                //assertU("<delete><query>id:" + id + "</query></delete>");

                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleteByQuery", id, "val=", nextVal);
                                writer.deleteDocuments(new TermQuery(new Term("id", Integer.toString(id))));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleteByQuery", id, "val=", nextVal, "DONE");
                            } else {
                                // model.put(id, nextVal);   // uncomment this and this test should fail.

                                // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
                                Document d = new Document();
                                d.add(new Field("id", Integer.toString(id), idFt));
                                d.add(new Field(field, Long.toString(nextVal), ft2));
                                verbose("adding id", id, "val=", nextVal);
                                writer.updateDocument(new Term("id", Integer.toString(id)), d);
                                if (tombstones) {
                                    // remove tombstone after new addition (this should be optional?)
                                    verbose("deleting tombstone for id", id);
                                    writer.deleteDocuments(new Term("id", "-" + Integer.toString(id)));
                                    verbose("deleting tombstone for id", id, "DONE");
                                }

                                model.put(id, new DocInfo(0, nextVal));
                                verbose("adding id", id, "val=", nextVal, "DONE");
                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        };

        threads.add(thread);
    }

    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        DocInfo info;
                        synchronized (globalLock) {
                            info = committedModel.get(id);
                        }
                        long val = info.val;

                        IndexReader r;
                        synchronized (globalLock) {
                            r = reader;
                            r.incRef();
                        }

                        int docid = getFirstMatch(r, new Term("id", Integer.toString(id)));

                        if (docid < 0 && tombstones) {
                            // if we couldn't find the doc, look for it's tombstone
                            docid = getFirstMatch(r, new Term("id", "-" + Integer.toString(id)));
                            if (docid < 0) {
                                if (val == -1L) {
                                    // expected... no doc was added yet
                                    r.decRef();
                                    continue;
                                }
                                verbose("ERROR: Couldn't find a doc  or tombstone for id", id, "using reader",
                                        r, "expected value", val);
                                fail("No documents or tombstones found for id " + id + ", expected at least "
                                        + val);
                            }
                        }

                        if (docid < 0 && !tombstones) {
                            // nothing to do - we can't tell anything from a deleted doc without tombstones
                        } else {
                            if (docid < 0) {
                                verbose("ERROR: Couldn't find a doc for id", id, "using reader", r);
                            }
                            assertTrue(docid >= 0); // we should have found the document, or it's tombstone
                            Document doc = r.document(docid);
                            long foundVal = Long.parseLong(doc.get(field));
                            if (foundVal < Math.abs(val)) {
                                verbose("ERROR: id", id, "model_val=", val, " foundVal=", foundVal, "reader=",
                                        reader);
                            }
                            assertTrue(foundVal >= Math.abs(val));
                        }

                        r.decRef();
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }

    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    writer.close();
    reader.close();
    dir.close();
}

From source file:org.hibernate.search.backend.impl.lucene.NRTWorkspaceImpl.java

License:Open Source License

@Override
public IndexReader openIndexReader() {
    IndexReader indexReader;
    if (indexReaderIsFresh()) {
        indexReader = currentReader.get();
    } else {//  ww w .j  av  a 2 s .c  o  m
        indexReader = refreshReaders();
    }
    if (indexReader == null) {
        writeLock.lock();
        try {
            if (shutdown) {
                throw new AssertionFailure("IndexReader requested after ReaderProvider is shutdown");
            }
            indexReader = currentReader.get();
            if (indexReader == null) {
                indexReader = writerHolder.openDirectoryIndexReader();
                currentReader.set(indexReader);
            }
        } finally {
            writeLock.unlock();
        }
    }
    indexReader.incRef();
    return indexReader;
}