Example usage for org.apache.lucene.index DirectoryReader getVersion

List of usage examples for org.apache.lucene.index DirectoryReader getVersion

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader getVersion.

Prototype

public abstract long getVersion();

Source Link

Document

Version number when this IndexReader was opened.

Usage

From source file:com.github.rnewson.couchdb.lucene.DatabaseIndexer.java

License:Apache License

public void info(final HttpServletRequest req, final HttpServletResponse resp)
        throws IOException, JSONException {
    final IndexState state = getState(req, resp);
    if (state == null)
        return;/*w  w w . j a v  a2 s . c  om*/
    final DirectoryReader reader = state.borrowReader(true);
    try {
        final JSONObject result = new JSONObject();
        result.put("current", reader.isCurrent());
        result.put("disk_size", Utils.directorySize(reader.directory()));
        result.put("doc_count", reader.numDocs());
        result.put("doc_del_count", reader.numDeletedDocs());
        result.put("uuid", state.getUuid());
        result.put("digest", state.getDigest());
        result.put("update_seq", getUpdateSequence(reader.getIndexCommit().getUserData()));
        final JSONArray fields = new JSONArray();
        for (AtomicReaderContext leaf : reader.leaves()) {
            for (FieldInfo info : leaf.reader().getFieldInfos()) {
                if (info.name.startsWith("_")) {
                    continue;
                }
                if (info.isIndexed()) {
                    fields.put(info.name);
                }
            }
        }
        result.put("fields", fields);
        result.put("version", reader.getVersion());
        result.put("ref_count", reader.getRefCount());

        final JSONObject info = new JSONObject();
        info.put("code", 200);
        info.put("json", result);

        ServletUtils.setResponseContentTypeAndEncoding(req, resp);
        final Writer writer = resp.getWriter();
        try {
            writer.write(result.toString());
        } finally {
            writer.close();
        }
    } finally {
        state.returnReader(reader);
    }
}

From source file:com.lucid.solr.sidecar.SidecarIndexReader.java

License:Apache License

public SidecarIndexReader(SidecarIndexReaderFactory factory, DirectoryReader main,
        AtomicReader[] sidecarReaders, AtomicReader[] parallelReaders, String boostData, File sidecarDir)
        throws IOException {
    super(main.directory(), parallelReaders);
    assert assertSaneReaders(parallelReaders);
    //LOG.info("SidecarIndexReader: new " + this);
    this.factory = factory;
    this.main = main;
    this.parallelReaders = parallelReaders;
    this.sidecarReaders = sidecarReaders;
    //this.parallel = parallel;
    this.mainReaders = getSequentialSubReaders(main);
    resourcesLastModified = main.getVersion();
    this.version = resourcesLastModified;
    this.boostData = boostData;
    this.dir = main.directory();
    this.sidecarDir = sidecarDir;
}

From source file:org.apache.solr.handler.admin.LukeRequestHandler.java

License:Apache License

public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
    Directory dir = reader.directory();/*from  ww w  .  j  a v  a 2s.c o m*/
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());
    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));

    indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
    indexInfo.add("segmentCount", reader.leaves().size());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("userData", reader.getIndexCommit().getUserData());
    String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
    if (s != null) {
        indexInfo.add("lastModified", new Date(Long.parseLong(s)));
    }
    return indexInfo;
}

From source file:org.apache.solr.handler.component.AlfrescoLukeRequestHandler.java

License:Open Source License

public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
    Directory dir = reader.directory();/*  w  w  w .jav  a 2s.co  m*/
    SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<>();

    indexInfo.add("numDocs", reader.numDocs());
    indexInfo.add("maxDoc", reader.maxDoc());
    indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs());
    indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader));

    indexInfo.add("version", reader.getVersion()); // TODO? Is this
    // different then:
    // IndexReader.getCurrentVersion(
    // dir )?
    indexInfo.add("segmentCount", reader.leaves().size());
    indexInfo.add("current", reader.isCurrent());
    indexInfo.add("hasDeletions", reader.hasDeletions());
    indexInfo.add("directory", dir);
    indexInfo.add("userData", reader.getIndexCommit().getUserData());
    String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
    if (s != null) {
        indexInfo.add("lastModified", new Date(Long.parseLong(s)));
    }
    return indexInfo;
}

From source file:org.apache.solr.schema.RandomSortField.java

License:Apache License

/** 
 * Given a field name and an IndexReader, get a random hash seed.
 * Using dynamic fields, you can force the random order to change 
 *///from w w  w  . ja v  a  2  s . co m
private static int getSeed(String fieldName, AtomicReaderContext context) {
    final DirectoryReader top = (DirectoryReader) ReaderUtil.getTopLevelContext(context).reader();
    // calling getVersion() on a segment will currently give you a null pointer exception, so
    // we use the top-level reader.
    return fieldName.hashCode() + context.docBase + (int) top.getVersion();
}

From source file:org.apache.solr.search.TestStressLucene.java

License:Apache License

@Test
public void testStressLuceneNRT() throws Exception {
    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = 1 + random().nextInt(5);
    final int ndocs = 5 + (random().nextBoolean() ? random().nextInt(25) : random().nextInt(200));
    int nWriteThreads = 5 + random().nextInt(25);

    final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers

    final AtomicLong operations = new AtomicLong(100000); // number of query operations to perform in total
    int nReadThreads = 5 + random().nextInt(25);
    final boolean tombstones = random().nextBoolean();
    final boolean syncCommits = random().nextBoolean();

    verbose("commitPercent=", commitPercent);
    verbose("softCommitPercent=", softCommitPercent);
    verbose("deletePercent=", deletePercent);
    verbose("deleteByQueryPercent=", deleteByQueryPercent);
    verbose("ndocs=", ndocs);
    verbose("nWriteThreads=", nWriteThreads);
    verbose("nReadThreads=", nReadThreads);
    verbose("maxConcurrentCommits=", maxConcurrentCommits);
    verbose("operations=", operations);
    verbose("tombstones=", tombstones);
    verbose("syncCommits=", syncCommits);

    initModel(ndocs);/*from w  w  w . java2  s. co m*/

    final AtomicInteger numCommitting = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();

    final FieldType idFt = new FieldType();
    idFt.setIndexed(true);
    idFt.setStored(true);
    idFt.setOmitNorms(true);
    idFt.setTokenized(false);
    idFt.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);

    final FieldType ft2 = new FieldType();
    ft2.setIndexed(false);
    ft2.setStored(true);

    // model how solr does locking - only allow one thread to do a hard commit at once, and only one thread to do a soft commit, but
    // a hard commit in progress does not stop a soft commit.
    final Lock hardCommitLock = syncCommits ? new ReentrantLock() : null;
    final Lock reopenLock = syncCommits ? new ReentrantLock() : null;

    // RAMDirectory dir = new RAMDirectory();
    // final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)));

    Directory dir = newDirectory();

    final RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    writer.setDoRandomForceMergeAssert(false);

    // writer.commit();
    // reader = IndexReader.open(dir);
    // make this reader an NRT reader from the start to avoid the first non-writer openIfChanged
    // to only opening at the last commit point.
    reader = DirectoryReader.open(writer.w, true);

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.get() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
                                Map<Integer, DocInfo> newCommittedModel;
                                long version;
                                DirectoryReader oldReader;

                                boolean softCommit = rand.nextInt(100) < softCommitPercent;

                                if (!softCommit) {
                                    // only allow one hard commit to proceed at once
                                    if (hardCommitLock != null)
                                        hardCommitLock.lock();
                                    verbose("hardCommit start");

                                    writer.commit();
                                }

                                if (reopenLock != null)
                                    reopenLock.lock();

                                synchronized (globalLock) {
                                    newCommittedModel = new HashMap<Integer, DocInfo>(model); // take a snapshot
                                    version = snapshotCount++;
                                    oldReader = reader;
                                    oldReader.incRef(); // increment the reference since we will use this for reopening
                                }

                                if (!softCommit) {
                                    // must commit after taking a snapshot of the model
                                    // writer.commit();
                                }

                                verbose("reopen start using", oldReader);

                                DirectoryReader newReader;
                                if (softCommit) {
                                    newReader = DirectoryReader.openIfChanged(oldReader, writer.w, true);
                                } else {
                                    // will only open to last commit
                                    newReader = DirectoryReader.openIfChanged(oldReader);
                                }

                                if (newReader == null) {
                                    oldReader.incRef();
                                    newReader = oldReader;
                                }
                                oldReader.decRef();

                                verbose("reopen result", newReader);

                                synchronized (globalLock) {
                                    assert newReader.getRefCount() > 0;
                                    assert reader.getRefCount() > 0;

                                    // install the new reader if it's newest (and check the current version since another reader may have already been installed)
                                    if (newReader.getVersion() > reader.getVersion()) {
                                        reader.decRef();
                                        reader = newReader;

                                        // install this snapshot only if it's newer than the current one
                                        if (version >= committedModelClock) {
                                            committedModel = newCommittedModel;
                                            committedModelClock = version;
                                        }

                                    } else {
                                        // close if unused
                                        newReader.decRef();
                                    }

                                }

                                if (reopenLock != null)
                                    reopenLock.unlock();

                                if (!softCommit) {
                                    if (hardCommitLock != null)
                                        hardCommitLock.unlock();
                                }

                            }
                            numCommitting.decrementAndGet();
                            continue;
                        }

                        int id = rand.nextInt(ndocs);
                        Object sync = syncArr[id];

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        // We can't concurrently update the same document and retain our invariants of increasing values
                        // since we can't guarantee what order the updates will be executed.
                        synchronized (sync) {
                            DocInfo info = model.get(id);
                            long val = info.val;
                            long nextVal = Math.abs(val) + 1;

                            if (oper < commitPercent + deletePercent) {
                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleting id", id, "val=", nextVal);
                                writer.deleteDocuments(new Term("id", Integer.toString(id)));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleting id", id, "val=", nextVal, "DONE");

                            } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                                //assertU("<delete><query>id:" + id + "</query></delete>");

                                // add tombstone first
                                if (tombstones) {
                                    Document d = new Document();
                                    d.add(new Field("id", "-" + Integer.toString(id), idFt));
                                    d.add(new Field(field, Long.toString(nextVal), ft2));
                                    verbose("adding tombstone for id", id, "val=", nextVal);
                                    writer.updateDocument(new Term("id", "-" + Integer.toString(id)), d);
                                }

                                verbose("deleteByQuery", id, "val=", nextVal);
                                writer.deleteDocuments(new TermQuery(new Term("id", Integer.toString(id))));
                                model.put(id, new DocInfo(0, -nextVal));
                                verbose("deleteByQuery", id, "val=", nextVal, "DONE");
                            } else {
                                // model.put(id, nextVal);   // uncomment this and this test should fail.

                                // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
                                Document d = new Document();
                                d.add(new Field("id", Integer.toString(id), idFt));
                                d.add(new Field(field, Long.toString(nextVal), ft2));
                                verbose("adding id", id, "val=", nextVal);
                                writer.updateDocument(new Term("id", Integer.toString(id)), d);
                                if (tombstones) {
                                    // remove tombstone after new addition (this should be optional?)
                                    verbose("deleting tombstone for id", id);
                                    writer.deleteDocuments(new Term("id", "-" + Integer.toString(id)));
                                    verbose("deleting tombstone for id", id, "DONE");
                                }

                                model.put(id, new DocInfo(0, nextVal));
                                verbose("adding id", id, "val=", nextVal, "DONE");
                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        };

        threads.add(thread);
    }

    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        DocInfo info;
                        synchronized (globalLock) {
                            info = committedModel.get(id);
                        }
                        long val = info.val;

                        IndexReader r;
                        synchronized (globalLock) {
                            r = reader;
                            r.incRef();
                        }

                        int docid = getFirstMatch(r, new Term("id", Integer.toString(id)));

                        if (docid < 0 && tombstones) {
                            // if we couldn't find the doc, look for it's tombstone
                            docid = getFirstMatch(r, new Term("id", "-" + Integer.toString(id)));
                            if (docid < 0) {
                                if (val == -1L) {
                                    // expected... no doc was added yet
                                    r.decRef();
                                    continue;
                                }
                                verbose("ERROR: Couldn't find a doc  or tombstone for id", id, "using reader",
                                        r, "expected value", val);
                                fail("No documents or tombstones found for id " + id + ", expected at least "
                                        + val);
                            }
                        }

                        if (docid < 0 && !tombstones) {
                            // nothing to do - we can't tell anything from a deleted doc without tombstones
                        } else {
                            if (docid < 0) {
                                verbose("ERROR: Couldn't find a doc for id", id, "using reader", r);
                            }
                            assertTrue(docid >= 0); // we should have found the document, or it's tombstone
                            Document doc = r.document(docid);
                            long foundVal = Long.parseLong(doc.get(field));
                            if (foundVal < Math.abs(val)) {
                                verbose("ERROR: id", id, "model_val=", val, " foundVal=", foundVal, "reader=",
                                        reader);
                            }
                            assertTrue(foundVal >= Math.abs(val));
                        }

                        r.decRef();
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }

    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    writer.close();
    reader.close();
    dir.close();
}

From source file:org.dspace.search.DSQuery.java

License:BSD License

/**
 * get an IndexSearcher, hopefully a cached one (gives much better
 * performance.) checks to see if the index has been modified - if so, it
 * creates a new IndexSearcher/*from  w ww .j  a  v a2s . co  m*/
 */
protected static synchronized IndexSearcher getSearcher(Context c) throws IOException {

    // If we have already opened a searcher, check to see if the index has been updated
    // If it has, we need to close the existing searcher - we will open a new one later

    Directory searchDir = FSDirectory.open(new File(indexDir));
    DirectoryReader reader = DirectoryReader.open(searchDir);
    if (searcher != null && lastModified != reader.getVersion()) {
        try {
            // Close the cached IndexSearcher
            searcher.getIndexReader().close();
        } catch (IOException ioe) {
            // Index is probably corrupt. Log the error, but continue to either:
            // 1) Return existing searcher (may yet throw exception, no worse than throwing here)
            log.warn("DSQuery: Unable to check for updated index", ioe);
        } finally {
            searcher = null;
        }
    }

    // There is no existing searcher - either this is the first execution,
    // or the index has been updated and we closed the old index.
    if (searcher == null) {
        // So, open a new searcher
        lastModified = reader.getVersion();
        searcher = new IndexSearcher(reader);

    } else {
        reader.close();
        searchDir.close();
    }

    return searcher;
}

From source file:org.dspace.search.LuceneIndex.java

License:BSD License

/**
 * get an IndexSearcher, hopefully a cached one (gives much better
 * performance.) checks to see if the index has been modified - if so, it
 * creates a new IndexSearcher// w w w  . j av a 2 s. c o  m
 */
protected synchronized IndexSearcher getSearcher() throws IOException {

    // If we have already opened a searcher, check to see if the index has been updated
    // If it has, we need to close the existing searcher - we will open a new one later
    Directory searchDir = FSDirectory.open(new File(indexDirectory));
    DirectoryReader idxReader = DirectoryReader.open(searchDir);//getSearcher().getIndexReader();
    if (searcher != null && lastModified != idxReader.getVersion()) { /*
                                                                        try
                                                                        {
                                                                      // Close the cached IndexSearcher
                                                                      // RLR FIXME
                                                                      //searcher.close();
                                                                        }
                                                                        catch (IOException ioe)
                                                                        {
                                                                      // Index is probably corrupt. Log the error, but continue to either:
                                                                      // 1) Return existing searcher (may yet throw exception, no worse than throwing here)
                                                                      log.warn("DSQuery: Unable to check for updated index", ioe);
                                                                        }
                                                                        finally
                                                                        {
                                                                           searcher = null;
                                                                        }
                                                                        */
    }

    // There is no existing searcher - either this is the first execution,
    // or the index has been updated and we closed the old index.
    if (searcher == null) {
        // So, open a new searcher
        lastModified = idxReader.getVersion();
        String osName = System.getProperty("os.name");
        // RLR TODO - check Read only restriction here
        IndexReader reader = IndexReader.open(searchDir);
        if (osName != null && osName.toLowerCase().contains("windows")) {
            searcher = new IndexSearcher(reader) {
                /*
                 * TODO: Has Lucene fixed this bug yet?
                 * Lucene doesn't release read locks in
                 * windows properly on finalize. Our hack
                 * extend IndexSearcher to force close().
                 */
                @Override
                protected void finalize() throws Throwable {
                    //RLR FIXME
                    //this.close();
                    super.finalize();
                }
            };
        } else {
            searcher = new IndexSearcher(reader);
        }
    }

    return searcher;
}

From source file:org.elasticsearch.indices.IndicesRequestCache.java

License:Apache License

BytesReference getOrCompute(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey)
        throws Exception {
    final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey);
    Loader loader = new Loader(cacheEntity);
    Value value = cache.computeIfAbsent(key, loader);
    if (loader.isLoaded()) {
        key.entity.onMiss();/*from w  w  w. j  av  a  2s.  co  m*/
        // see if its the first time we see this reader, and make sure to register a cleanup key
        CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion());
        if (!registeredClosedListeners.containsKey(cleanupKey)) {
            Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
            if (previous == null) {
                ElasticsearchDirectoryReader.addReaderCloseListener(reader, cleanupKey);
            }
        }
    } else {
        key.entity.onHit();
    }
    return value.reference;
}