Example usage for org.apache.lucene.index DirectoryReader openIfChanged

List of usage examples for org.apache.lucene.index DirectoryReader openIfChanged

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader openIfChanged.

Prototype

public static DirectoryReader openIfChanged(DirectoryReader oldReader, IndexWriter writer,
        boolean applyAllDeletes) throws IOException 

Source Link

Document

Expert: Opens a new reader, if there are any changes, controlling whether past deletions should be applied.

Usage

From source file:com.bah.lucene.BaseDirectoryTestSuite.java

License:Apache License

@Test
public void testCreateIndex() throws IOException {
    long s = System.nanoTime();
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
    FSDirectory control = FSDirectory.open(fileControl);
    Directory dir = getControlDir(control, directory);
    // The serial merge scheduler can be useful for debugging.
    // conf.setMergeScheduler(new SerialMergeScheduler());
    IndexWriter writer = new IndexWriter(dir, conf);
    int numDocs = 10000;
    DirectoryReader reader = null;// w w  w  .ja va 2 s . c om
    for (int i = 0; i < 100; i++) {
        if (reader == null) {
            reader = DirectoryReader.open(writer, true);
        } else {
            DirectoryReader old = reader;
            reader = DirectoryReader.openIfChanged(old, writer, true);
            if (reader == null) {
                reader = old;
            } else {
                old.close();
            }
        }
        assertEquals(i * numDocs, reader.numDocs());
        IndexSearcher searcher = new IndexSearcher(reader);
        NumericRangeQuery<Integer> query = NumericRangeQuery.newIntRange("id", 42, 42, true, true);
        TopDocs topDocs = searcher.search(query, 10);
        assertEquals(i, topDocs.totalHits);
        addDocuments(writer, numDocs);
    }
    writer.close(false);
    reader.close();
    long e = System.nanoTime();
    System.out.println("Total time [" + (e - s) / 1000000.0 + " ms]");
}

From source file:org.apache.blur.store.BaseDirectoryTestSuite.java

License:Apache License

@Test
public void testCreateIndex() throws IOException {
    long s = System.nanoTime();
    IndexWriterConfig conf = new IndexWriterConfig(LuceneVersionConstant.LUCENE_VERSION, new KeywordAnalyzer());
    IndexDeletionPolicyReader indexDeletionPolicy = new IndexDeletionPolicyReader(
            new KeepOnlyLastCommitDeletionPolicy());
    conf.setIndexDeletionPolicy(indexDeletionPolicy);
    FSDirectory control = FSDirectory.open(fileControl);
    Directory dir = getControlDir(control, directory);
    // The serial merge scheduler can be useful for debugging.
    // conf.setMergeScheduler(new SerialMergeScheduler());
    IndexWriter writer = new IndexWriter(dir, conf);
    int numDocs = 1000;
    DirectoryReader reader = null;//from   w w  w  .ja v a  2 s.c  o m
    long gen = 0;
    for (int i = 0; i < 100; i++) {
        if (reader == null) {
            reader = DirectoryReader.open(writer, true);
            gen = reader.getIndexCommit().getGeneration();
            indexDeletionPolicy.register(gen);
        } else {
            DirectoryReader old = reader;
            reader = DirectoryReader.openIfChanged(old, writer, true);
            if (reader == null) {
                reader = old;
            } else {
                long newGen = reader.getIndexCommit().getGeneration();
                indexDeletionPolicy.register(newGen);
                indexDeletionPolicy.unregister(gen);
                old.close();
                gen = newGen;
            }
        }
        assertEquals(i * numDocs, reader.numDocs());
        IndexSearcher searcher = new IndexSearcher(reader);
        NumericRangeQuery<Integer> query = NumericRangeQuery.newIntRange("id", 42, 42, true, true);
        TopDocs topDocs = searcher.search(query, 10);
        assertEquals(i, topDocs.totalHits);
        addDocuments(writer, numDocs);
    }
    writer.close(false);
    reader.close();
    long e = System.nanoTime();
    System.out.println("Total time [" + (e - s) / 1000000.0 + " ms]");
}

From source file:org.apache.solr.core.SolrCore.java

License:Apache License

/** Opens a new searcher and returns a RefCounted&lt;SolrIndexSearcher&gt; with it's reference incremented.
 *
 * "realtime" means that we need to open quickly for a realtime view of the index, hence don't do any
 * autowarming and add to the _realtimeSearchers queue rather than the _searchers queue (so it won't
 * be used for autowarming by a future normal searcher).  A "realtime" searcher will currently never
 * become "registered" (since it currently lacks caching).
 *
 * realtimeSearcher is updated to the latest opened searcher, regardless of the value of "realtime".
 *
 * This method acquires openSearcherLock - do not call with searckLock held!
 *//*  w ww.java 2  s  .  c o m*/
public RefCounted<SolrIndexSearcher> openNewSearcher(boolean updateHandlerReopens, boolean realtime) {
    if (isClosed()) { // catch some errors quicker
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
    }

    SolrIndexSearcher tmp;
    RefCounted<SolrIndexSearcher> newestSearcher = null;
    boolean nrt = solrConfig.nrtMode && updateHandlerReopens;

    openSearcherLock.lock();
    try {
        String newIndexDir = getNewIndexDir();
        String indexDirFile = null;
        String newIndexDirFile = null;

        // if it's not a normal near-realtime update, check that paths haven't changed.
        if (!nrt) {
            indexDirFile = getDirectoryFactory().normalize(getIndexDir());
            newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
        }

        synchronized (searcherLock) {
            newestSearcher = realtimeSearcher;
            if (newestSearcher != null) {
                newestSearcher.incref(); // the matching decref is in the finally block
            }
        }

        if (newestSearcher != null && (nrt || indexDirFile.equals(newIndexDirFile))) {

            DirectoryReader newReader;
            DirectoryReader currentReader = newestSearcher.get().getIndexReader();

            // SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);

            RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState().getIndexWriter(null);
            try {
                if (writer != null && solrConfig.nrtMode) {
                    // if in NRT mode, open from the writer
                    newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true);
                } else {
                    // verbose("start reopen without writer, reader=", currentReader);
                    // if not in NRT mode, just re-open the reader
                    newReader = DirectoryReader.openIfChanged(currentReader);
                    // verbose("reopen result", newReader);
                }
            } finally {
                if (writer != null) {
                    writer.decref();
                }
            }

            if (newReader == null) {
                // if this is a request for a realtime searcher, just return the same searcher if there haven't been any changes.
                if (realtime) {
                    newestSearcher.incref();
                    return newestSearcher;
                }

                currentReader.incRef();
                newReader = currentReader;
            }

            // for now, turn off caches if this is for a realtime reader (caches take a little while to instantiate)
            tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig,
                    (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);

        } else {
            // newestSearcher == null at this point

            if (newReaderCreator != null) {
                // this is set in the constructor if there is a currently open index writer
                // so that we pick up any uncommitted changes and so we don't go backwards
                // in time on a core reload
                DirectoryReader newReader = newReaderCreator.call();
                tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig,
                        (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
            } else if (solrConfig.nrtMode) {
                RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState().getIndexWriter(this);
                DirectoryReader newReader = null;
                try {
                    newReader = indexReaderFactory.newReader(writer.get(), this);
                } finally {
                    writer.decref();
                }
                tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig,
                        (realtime ? "realtime" : "main"), newReader, true, !realtime, true, directoryFactory);
            } else {
                // normal open that happens at startup
                // verbose("non-reopen START:");
                tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig,
                        "main", true, directoryFactory);
                // verbose("non-reopen DONE: searcher=",tmp);
            }
        }

        List<RefCounted<SolrIndexSearcher>> searcherList = realtime ? _realtimeSearchers : _searchers;
        RefCounted<SolrIndexSearcher> newSearcher = newHolder(tmp, searcherList); // refcount now at 1

        // Increment reference again for "realtimeSearcher" variable.  It should be at 2 after.
        // When it's decremented by both the caller of this method, and by realtimeSearcher being replaced,
        // it will be closed.
        newSearcher.incref();

        synchronized (searcherLock) {
            if (realtimeSearcher != null) {
                realtimeSearcher.decref();
            }
            realtimeSearcher = newSearcher;
            searcherList.add(realtimeSearcher);
        }

        return newSearcher;

    } catch (Exception e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error opening new searcher", e);
    } finally {
        openSearcherLock.unlock();
        if (newestSearcher != null) {
            newestSearcher.decref();
        }
    }

}

From source file:org.apache.solr.search.TestStressLucene.java

License:Apache License

@Test
public void testStressLuceneNRT() throws Exception {
    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = 1 + random().nextInt(5);
    final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200));
    int nWriteThreads = 5 + random().nextInt(25);

    final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers

    final AtomicLong operations = new AtomicLong(100000); // number of query operations to perform in total
    int nReadThreads = 5 + random().nextInt(25);
    final boolean tombstones = random().nextBoolean();
    final boolean syncCommits = random().nextBoolean();

    verbose("commitPercent=", commitPercent);
    verbose("softCommitPercent=", softCommitPercent);
    verbose("deletePercent=", deletePercent);
    verbose("deleteByQueryPercent=", deleteByQueryPercent);
    verbose("ndocs=", ndocs);
    verbose("nWriteThreads=", nWriteThreads);
    verbose("nReadThreads=", nReadThreads);
    verbose("maxConcurrentCommits=", maxConcurrentCommits);
    verbose("operations=", operations);
    verbose("tombstones=", tombstones);
    verbose("syncCommits=", syncCommits);

    initModel(ndocs);/*  w  w w  .  j  ava  2  s .co  m*/

    final AtomicInteger numCommitting = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();

    final FieldType idFt = new FieldType();
    idFt.setIndexed(true);
    idFt.setStored(true);
    idFt.setOmitNorms(true);
    idFt.setTokenized(false);
    idFt.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);

    final FieldType ft2 = new FieldType();
    ft2.setIndexed(false);
    ft2.setStored(true);

    // model how solr does locking - only allow one thread to do a hard commit at once, and only one thread to do a soft commit, but
    // a hard commit in progress does not stop a soft commit.
    final Lock hardCommitLock = syncCommits ? new ReentrantLock() : null;
    final Lock reopenLock = syncCommits ? new ReentrantLock() : null;

    // RAMDirectory dir = new RAMDirectory();
    // final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)));

    Directory dir = newDirectory();

    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    writer.setDoRandomForceMergeAssert(false);

    // writer.commit();
    // reader = IndexReader.open(dir);
    // make this reader an NRT reader from the start to avoid the first non-writer openIfChanged
    // to only opening at the last commit point.
    reader = DirectoryReader.open(writer.w, true);

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.get() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
                                Map<Integer, DocInfo> newCommittedModel;
                                long version;
                                DirectoryReader oldReader;

                                boolean softCommit = rand.nextInt(100) < softCommitPercent;

                                if (!softCommit) {
                                    // only allow one hard commit to proceed at once
                                    if (hardCommitLock != null)
                                        hardCommitLock.lock();
                                    verbose("hardCommit start");

                                    writer.commit();
                                }

                                if (reopenLock != null)
                                    reopenLock.lock();

                                synchronized (globalLock) {
                                    newCommittedModel = new HashMap<Integer, DocInfo>(model); // take a snapshot
                                    version = snapshotCount++;
                                    oldReader = reader;
                                    oldReader.incRef(); // increment the reference since we will use this for reopening
                                }

                                if (!softCommit) {
                                    // must commit after taking a snapshot of the model
                                    // writer.commit();
                                }

                                verbose("reopen start using", oldReader);

                                DirectoryReader newReader;
                                if (softCommit) {
                                    newReader = DirectoryReader.openIfChanged(oldReader, writer.w, true);
                                } else {
                                    // will only open to last commit
                                    newReader = DirectoryReader.openIfChanged(oldReader);
                                }

                                if (newReader == null) {
                                    oldReader.incRef();
                                    newReader = oldReader;
                                }
                                oldReader.decRef();

                                verbose("reopen result", newReader);

                                synchronized (globalLock) {
                                    assert newReader.getRefCount() > 0;
                                    assert reader.getRefCount() > 0;

                                    // install the new reader if it's newest (and check the current version since another reader may have already been installed)
                                    if (newReader.getVersion() > reader.getVersion()) {
                                        reader.decRef();
                                        reader = newReader;

                                        // install this snapshot only if it's newer than the current one
                                        if (version >= committedModelClock) {
                                            committedModel = newCommittedModel;
                                            committedModelClock = version;
                                        }

                                    } else {
                                        // close if unused
                                        newReader.decRef();
                                    }

                                }

                                if (reopenLock != null)
                                    reopenLock.unlock();

                                if (!softCommit) {
                                    if (hardCommitLock != null)
                                        hardCommitLock.unlock();
                                }

                            }
                            numCommitting.decrementAndGet();
                            continue;
                        }

                        int id = rand.nextInt(ndocs);
                        Object sync = syncArr[id];

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        // We can't concurrently update the same document and retain our invariants of increasing values
                        // since we can't guarantee what order the updates will be executed.
                        synchronized (sync) {
                            DocInfo info = model.get(id);
                            long val = info.val;
                            long nextVal = Math.abs(val) + 1;

                            if (oper < commitPercent + deletePercent) {
                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleting id", id, "val=", nextVal);
                                writer.deleteDocuments(new Term("id", Integer.toString(id)));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleting id", id, "val=", nextVal, "DONE");

                            } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                                //assertU("<delete><query>id:" + id + "</query></delete>");

                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleteByQuery", id, "val=", nextVal);
                                writer.deleteDocuments(new TermQuery(new Term("id", Integer.toString(id))));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleteByQuery", id, "val=", nextVal, "DONE");
                            } else {
                                // model.put(id, nextVal);   // uncomment this and this test should fail.

                                // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
                                Document d = new Document();
                                d.add(new Field("id", Integer.toString(id), idFt));
                                d.add(new Field(field, Long.toString(nextVal), ft2));
                                verbose("adding id", id, "val=", nextVal);
                                writer.updateDocument(new Term("id", Integer.toString(id)), d);
                                if (tombstones) {
                                    // remove tombstone after new addition (this should be optional?)
                                    verbose("deleting tombstone for id", id);
                                    writer.deleteDocuments(new Term("id", "-" + Integer.toString(id)));
                                    verbose("deleting tombstone for id", id, "DONE");
                                }

                                model.put(id, new DocInfo(0, nextVal));
                                verbose("adding id", id, "val=", nextVal, "DONE");
                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        };

        threads.add(thread);
    }

    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        DocInfo info;
                        synchronized (globalLock) {
                            info = committedModel.get(id);
                        }
                        long val = info.val;

                        IndexReader r;
                        synchronized (globalLock) {
                            r = reader;
                            r.incRef();
                        }

                        int docid = getFirstMatch(r, new Term("id", Integer.toString(id)));

                        if (docid < 0 && tombstones) {
                            // if we couldn't find the doc, look for it's tombstone
                            docid = getFirstMatch(r, new Term("id", "-" + Integer.toString(id)));
                            if (docid < 0) {
                                if (val == -1L) {
                                    // expected... no doc was added yet
                                    r.decRef();
                                    continue;
                                }
                                verbose("ERROR: Couldn't find a doc  or tombstone for id", id, "using reader",
                                        r, "expected value", val);
                                fail("No documents or tombstones found for id " + id + ", expected at least "
                                        + val);
                            }
                        }

                        if (docid < 0 && !tombstones) {
                            // nothing to do - we can't tell anything from a deleted doc without tombstones
                        } else {
                            if (docid < 0) {
                                verbose("ERROR: Couldn't find a doc for id", id, "using reader", r);
                            }
                            assertTrue(docid >= 0); // we should have found the document, or it's tombstone
                            Document doc = r.document(docid);
                            long foundVal = Long.parseLong(doc.get(field));
                            if (foundVal < Math.abs(val)) {
                                verbose("ERROR: id", id, "model_val=", val, " foundVal=", foundVal, "reader=",
                                        reader);
                            }
                            assertTrue(foundVal >= Math.abs(val));
                        }

                        r.decRef();
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }

    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    writer.close();
    reader.close();
    dir.close();
}