Example usage for org.apache.lucene.index IndexReader decRef

List of usage examples for org.apache.lucene.index IndexReader decRef

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader decRef.

Prototype

@SuppressWarnings("try")
public final void decRef() throws IOException 

Source Link

Document

Expert: decreases the refCount of this IndexReader instance.

Usage

From source file:org.apache.blur.manager.writer.BlurIndexSimpleWriter.java

License:Apache License

private IndexSearcherCloseable getSecureIndexSearcher(final IndexReader indexReader) throws IOException {
    String readStr = null;// ww w.j a v a 2  s  .com
    String discoverStr = null;
    User user = UserContext.getUser();
    if (user != null) {
        Map<String, String> attributes = user.getAttributes();
        if (attributes != null) {
            readStr = attributes.get(ACL_READ);
            discoverStr = attributes.get(ACL_DISCOVER);
        }
    }
    Collection<String> readAuthorizations = toCollection(readStr);
    Collection<String> discoverAuthorizations = toCollection(discoverStr);
    return new IndexSearcherCloseableSecureBase(indexReader, _searchThreadPool, _accessControlFactory,
            readAuthorizations, discoverAuthorizations, _discoverableFields) {
        private boolean _closed;

        @Override
        public Directory getDirectory() {
            return _directory;
        }

        @Override
        public synchronized void close() throws IOException {
            if (!_closed) {
                indexReader.decRef();
                _closed = true;
            } else {
                // Not really sure why some indexes get closed called twice on them.
                // This is in place to log it.
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Searcher already closed [{0}].", new Throwable(), this);
                }
            }
        }
    };
}

From source file:org.apache.blur.manager.writer.BlurIndexSimpleWriter.java

License:Apache License

private IndexSearcherCloseable getInsecureIndexSearcher(final IndexReader indexReader) {
    return new IndexSearcherCloseableBase(indexReader, _searchThreadPool) {
        private boolean _closed;

        @Override/*w  w  w .  j a  v  a  2  s  .c  om*/
        public Directory getDirectory() {
            return _directory;
        }

        @Override
        public synchronized void close() throws IOException {
            if (!_closed) {
                indexReader.decRef();
                _closed = true;
            } else {
                // Not really sure why some indexes get closed called twice on them.
                // This is in place to log it.
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Searcher already closed [{0}].", new Throwable(), this);
                }
            }
        }
    };
}

From source file:org.apache.solr.search.TestStressLucene.java

License:Apache License

@Test
public void testStressLuceneNRT() throws Exception {
    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = 1 + random().nextInt(5);
    final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200));
    int nWriteThreads = 5 + random().nextInt(25);

    final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers

    final AtomicLong operations = new AtomicLong(100000); // number of query operations to perform in total
    int nReadThreads = 5 + random().nextInt(25);
    final boolean tombstones = random().nextBoolean();
    final boolean syncCommits = random().nextBoolean();

    verbose("commitPercent=", commitPercent);
    verbose("softCommitPercent=", softCommitPercent);
    verbose("deletePercent=", deletePercent);
    verbose("deleteByQueryPercent=", deleteByQueryPercent);
    verbose("ndocs=", ndocs);
    verbose("nWriteThreads=", nWriteThreads);
    verbose("nReadThreads=", nReadThreads);
    verbose("maxConcurrentCommits=", maxConcurrentCommits);
    verbose("operations=", operations);
    verbose("tombstones=", tombstones);
    verbose("syncCommits=", syncCommits);

    initModel(ndocs);//from w  w w .  jav  a 2s.  c om

    final AtomicInteger numCommitting = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();

    final FieldType idFt = new FieldType();
    idFt.setIndexed(true);
    idFt.setStored(true);
    idFt.setOmitNorms(true);
    idFt.setTokenized(false);
    idFt.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);

    final FieldType ft2 = new FieldType();
    ft2.setIndexed(false);
    ft2.setStored(true);

    // model how solr does locking - only allow one thread to do a hard commit at once, and only one thread to do a soft commit, but
    // a hard commit in progress does not stop a soft commit.
    final Lock hardCommitLock = syncCommits ? new ReentrantLock() : null;
    final Lock reopenLock = syncCommits ? new ReentrantLock() : null;

    // RAMDirectory dir = new RAMDirectory();
    // final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)));

    Directory dir = newDirectory();

    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    writer.setDoRandomForceMergeAssert(false);

    // writer.commit();
    // reader = IndexReader.open(dir);
    // make this reader an NRT reader from the start to avoid the first non-writer openIfChanged
    // to only opening at the last commit point.
    reader = DirectoryReader.open(writer.w, true);

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.get() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
                                Map<Integer, DocInfo> newCommittedModel;
                                long version;
                                DirectoryReader oldReader;

                                boolean softCommit = rand.nextInt(100) < softCommitPercent;

                                if (!softCommit) {
                                    // only allow one hard commit to proceed at once
                                    if (hardCommitLock != null)
                                        hardCommitLock.lock();
                                    verbose("hardCommit start");

                                    writer.commit();
                                }

                                if (reopenLock != null)
                                    reopenLock.lock();

                                synchronized (globalLock) {
                                    newCommittedModel = new HashMap<Integer, DocInfo>(model); // take a snapshot
                                    version = snapshotCount++;
                                    oldReader = reader;
                                    oldReader.incRef(); // increment the reference since we will use this for reopening
                                }

                                if (!softCommit) {
                                    // must commit after taking a snapshot of the model
                                    // writer.commit();
                                }

                                verbose("reopen start using", oldReader);

                                DirectoryReader newReader;
                                if (softCommit) {
                                    newReader = DirectoryReader.openIfChanged(oldReader, writer.w, true);
                                } else {
                                    // will only open to last commit
                                    newReader = DirectoryReader.openIfChanged(oldReader);
                                }

                                if (newReader == null) {
                                    oldReader.incRef();
                                    newReader = oldReader;
                                }
                                oldReader.decRef();

                                verbose("reopen result", newReader);

                                synchronized (globalLock) {
                                    assert newReader.getRefCount() > 0;
                                    assert reader.getRefCount() > 0;

                                    // install the new reader if it's newest (and check the current version since another reader may have already been installed)
                                    if (newReader.getVersion() > reader.getVersion()) {
                                        reader.decRef();
                                        reader = newReader;

                                        // install this snapshot only if it's newer than the current one
                                        if (version >= committedModelClock) {
                                            committedModel = newCommittedModel;
                                            committedModelClock = version;
                                        }

                                    } else {
                                        // close if unused
                                        newReader.decRef();
                                    }

                                }

                                if (reopenLock != null)
                                    reopenLock.unlock();

                                if (!softCommit) {
                                    if (hardCommitLock != null)
                                        hardCommitLock.unlock();
                                }

                            }
                            numCommitting.decrementAndGet();
                            continue;
                        }

                        int id = rand.nextInt(ndocs);
                        Object sync = syncArr[id];

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        // We can't concurrently update the same document and retain our invariants of increasing values
                        // since we can't guarantee what order the updates will be executed.
                        synchronized (sync) {
                            DocInfo info = model.get(id);
                            long val = info.val;
                            long nextVal = Math.abs(val) + 1;

                            if (oper < commitPercent + deletePercent) {
                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleting id", id, "val=", nextVal);
                                writer.deleteDocuments(new Term("id", Integer.toString(id)));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleting id", id, "val=", nextVal, "DONE");

                            } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                                //assertU("<delete><query>id:" + id + "</query></delete>");

                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleteByQuery", id, "val=", nextVal);
                                writer.deleteDocuments(new TermQuery(new Term("id", Integer.toString(id))));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleteByQuery", id, "val=", nextVal, "DONE");
                            } else {
                                // model.put(id, nextVal);   // uncomment this and this test should fail.

                                // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
                                Document d = new Document();
                                d.add(new Field("id", Integer.toString(id), idFt));
                                d.add(new Field(field, Long.toString(nextVal), ft2));
                                verbose("adding id", id, "val=", nextVal);
                                writer.updateDocument(new Term("id", Integer.toString(id)), d);
                                if (tombstones) {
                                    // remove tombstone after new addition (this should be optional?)
                                    verbose("deleting tombstone for id", id);
                                    writer.deleteDocuments(new Term("id", "-" + Integer.toString(id)));
                                    verbose("deleting tombstone for id", id, "DONE");
                                }

                                model.put(id, new DocInfo(0, nextVal));
                                verbose("adding id", id, "val=", nextVal, "DONE");
                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        };

        threads.add(thread);
    }

    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        DocInfo info;
                        synchronized (globalLock) {
                            info = committedModel.get(id);
                        }
                        long val = info.val;

                        IndexReader r;
                        synchronized (globalLock) {
                            r = reader;
                            r.incRef();
                        }

                        int docid = getFirstMatch(r, new Term("id", Integer.toString(id)));

                        if (docid < 0 && tombstones) {
                            // if we couldn't find the doc, look for it's tombstone
                            docid = getFirstMatch(r, new Term("id", "-" + Integer.toString(id)));
                            if (docid < 0) {
                                if (val == -1L) {
                                    // expected... no doc was added yet
                                    r.decRef();
                                    continue;
                                }
                                verbose("ERROR: Couldn't find a doc  or tombstone for id", id, "using reader",
                                        r, "expected value", val);
                                fail("No documents or tombstones found for id " + id + ", expected at least "
                                        + val);
                            }
                        }

                        if (docid < 0 && !tombstones) {
                            // nothing to do - we can't tell anything from a deleted doc without tombstones
                        } else {
                            if (docid < 0) {
                                verbose("ERROR: Couldn't find a doc for id", id, "using reader", r);
                            }
                            assertTrue(docid >= 0); // we should have found the document, or it's tombstone
                            Document doc = r.document(docid);
                            long foundVal = Long.parseLong(doc.get(field));
                            if (foundVal < Math.abs(val)) {
                                verbose("ERROR: id", id, "model_val=", val, " foundVal=", foundVal, "reader=",
                                        reader);
                            }
                            assertTrue(foundVal >= Math.abs(val));
                        }

                        r.decRef();
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }

    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    writer.close();
    reader.close();
    dir.close();
}

From source file:org.elasticsearch.common.lucene.manager.SearcherManager.java

License:Apache License

static IndexSearcher getSearcher(SearcherFactory searcherFactory, IndexReader reader) throws IOException {
    boolean success = false;
    final IndexSearcher searcher;
    try {/* www.j a v a 2 s  .  c  o  m*/
        searcher = searcherFactory.newSearcher(reader);
        if (searcher.getIndexReader() != reader) {
            throw new IllegalStateException("SearcherFactory must wrap exactly the provided reader (got "
                    + searcher.getIndexReader() + " but expected " + reader + ")");
        }
        success = true;
    } finally {
        if (!success) {
            reader.decRef();
        }
    }
    return searcher;
}

From source file:org.hibernate.search.backend.impl.lucene.NRTWorkspaceImpl.java

License:Open Source License

@Override
public void closeIndexReader(IndexReader reader) {
    if (reader == null) {
        return;/*from   ww  w  .jav a2  s .  c  om*/
    }
    try {
        //don't use IndexReader#close as it prevents further counter decrements!
        reader.decRef();
    } catch (IOException e) {
        log.unableToCloseLuceneIndexReader(e);
    }
}

From source file:org.mulgara.resolver.lucene.LuceneIndexerCache.java

License:Apache License

/** Force the reader closed by cleaning up outstanding references. 
 * @throws IOException *//*w ww  . j  a  va2s. c  o m*/
private static void forceClose(IndexReader reader) throws IOException {
    try {
        if (reader.getRefCount() > 1) {
            // This likely indicates a FullTextStringIndexTuples that was not properly closed.
            // Closing it now is likely to break any existing references to it.
            logger.warn("Forcing close of a reader that was returned to the cache with active references: "
                    + System.identityHashCode(reader));
            while (reader.getRefCount() > 1) {
                reader.decRef();
            }
        }
    } catch (IOException e) {
        logger.error("Can't decrement reference count to abandoned reader", e);
        throw e;
    } finally {
        reader.close();
    }
}

From source file:org.weborganic.flint.SearcherManager.java

License:artistic-license-2.0

/**
 * Release the given reader./*from ww  w . j  ava  2s. c o  m*/
 *
 * @param reader the reader to release
 *
 * @throws IOException If thrown when attempting to close the reader, when reader is no longer in use.
 */
protected synchronized void releaseReader(IndexReader reader) throws IOException {
    LOGGER.debug("Releasing reader {}", reader.hashCode());
    reader.decRef();
    // check if we should close an old one
    closeIfDirty(reader);
}

From source file:org.zenoss.zep.index.impl.lucene.LuceneEventIndexBackend.java

License:Open Source License

@Override
public LuceneSavedSearch buildSavedSearch(String uuid, EventQuery eventQuery) throws ZepException {
    if (eventQuery.getTimeout() < 1)
        throw new ZepException("Invalid timeout: " + eventQuery.getTimeout());

    IndexReader reader;
    try {/*from   w  ww.ja v  a 2 s .c o m*/
        reader = DirectoryReader.open(writer, false);
    } catch (IOException e) {
        String msg = "Unable to get Lucene reader";
        logger.warn(msg, e);
        throw new ZepException(msg, e);
    }
    try {
        final Query query = buildQuery(reader, eventQuery.getEventFilter(), eventQuery.getExclusionFilter());
        final Sort sort = buildSort(eventQuery.getSortList());
        return new LuceneSavedSearch(uuid, reader, query, sort, eventQuery.getTimeout());
    } catch (Exception e) {
        try {
            reader.decRef();
        } catch (IOException ex) {
            logger.warn("Exception decrementing reference count", ex);
        }
        if (e instanceof ZepException) {
            throw (ZepException) e;
        }
        throw new ZepException(e.getLocalizedMessage(), e);
    }
}