Example usage for org.apache.lucene.index DirectoryReader openIfChanged

List of usage examples for org.apache.lucene.index DirectoryReader openIfChanged

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader openIfChanged.

Prototype

public static DirectoryReader openIfChanged(DirectoryReader oldReader) throws IOException 

Source Link

Document

If the index has changed since the provided reader was opened, open and return a new reader; else, return null.

Usage

From source file:lux.Evaluator.java

License:Mozilla Public License

/**
 * reopen the searcher so it sees any updates.
 * Do NOT call this when operating within Solr: it interferes with Solr's management
 * of open searchers/readers./* w  ww . j a v  a 2s  .  co  m*/
 */
public void reopenSearcher() {
    LoggerFactory.getLogger(getClass()).debug("evaluator reopen searcher");
    try {
        LuxSearcher current = searcher;
        if (current != null) {
            searcher = new LuxSearcher(
                    DirectoryReader.openIfChanged((DirectoryReader) current.getIndexReader()));
            current.close();
        }
        resetURIResolver();
    } catch (IOException e) {
        throw new LuxException(e);
    }
}

From source file:net.ymate.platform.module.search.support.IndexHelper.java

License:Apache License

public IndexHelper(ISearchConfig config) {
    __isBuildWorkingSet = Collections.synchronizedSet(new HashSet<String>());
    // ??30/*from  w  ww. j a v  a  2 s . c  om*/
    long _period = config.getScheduledPeriod() * 1000L;
    if (_period <= 0) {
        _period = 30L * 1000L;
    }
    // commit?Reopen
    __scheduler = Executors.newSingleThreadScheduledExecutor();
    // ??
    __scheduler.scheduleAtFixedRate(new Runnable() {

        public void run() {
            if (__isWorking) {
                return;
            }
            __isWorking = true;
            try {
                _LOG.debug("Start Reopen Working...");
                for (Map.Entry<String, IndexSearcher> entry : Searchs.__SEARCH_CACHES.entrySet()) {
                    IndexReader _reader = entry.getValue().getIndexReader();
                    try {
                        IndexReader _reOpenedReader = DirectoryReader.openIfChanged((DirectoryReader) _reader);
                        if (_reOpenedReader != null && _reOpenedReader != _reader) {
                            _reader.decRef();
                            Searchs.__SEARCH_CACHES.put(entry.getKey(), new IndexSearcher(_reOpenedReader));
                        }
                    } catch (IOException ex) {
                        _LOG.error("Reopen And DecRef IndexReader Error:", ex);
                    }
                }
            } finally {
                _LOG.debug("End Reopen Working...");
                __isWorking = false;
            }
        }
    }, _period, _period, TimeUnit.MILLISECONDS);
}

From source file:org.ala.lucene.Autocompleter.java

License:Open Source License

private void reOpenReader() throws CorruptIndexException, IOException {
    if (autoCompleteReader == null) {
        autoCompleteReader = DirectoryReader.open(autoCompleteDirectory);
    } else {//from  w w w .  java2  s  . com
        //autoCompleteReader.reopen();
        DirectoryReader newReader = DirectoryReader.openIfChanged(autoCompleteReader);
        if (newReader != null)
            autoCompleteReader = newReader;
    }

    autoCompleteSearcher = new IndexSearcher(autoCompleteReader);
}

From source file:org.apache.blur.lucene.security.IndexSearcherTest.java

License:Apache License

private void runTest(int expected, Collection<String> readAuthorizations,
        Collection<String> discoverAuthorizations, Collection<String> discoverableFields)
        throws IOException, ParseException {
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43));
    Directory dir = new RAMDirectory();
    {/*from   ww w.ja va  2s  . c om*/
        IndexWriter writer = new IndexWriter(dir, conf);
        writer.addDocument(getEmpty());
        writer.commit();
        writer.addDocument(getDoc(0, "(a&b)|d", null, "f1", "f2"));
        writer.addDocument(getDoc(1, "a&b&c", null, "f1", "f2"));
        writer.addDocument(getDoc(2, "a&b&c&e", "a&b&c", "f1", "f2"));
        writer.addDocument(getDoc(3, null, null, "f1", "f2"));// can't find
        writer.close(false);
    }
    DirectoryReader reader = DirectoryReader.open(dir);
    validate(expected, 2, readAuthorizations, discoverAuthorizations, discoverableFields, dir, reader);
    {
        IndexWriter writer = new IndexWriter(dir, conf);
        writer.deleteDocuments(new Term("id", "0"));
        writer.addDocument(getDoc(0, "(a&b)|d", null, "f1", "f2"));
        writer.close(false);
    }
    reader = DirectoryReader.openIfChanged(reader);
    validate(expected, 3, readAuthorizations, discoverAuthorizations, discoverableFields, dir, reader);
    {
        IndexWriter writer = new IndexWriter(dir, conf);
        writer.deleteDocuments(new Term("id", "1"));
        writer.addDocument(getDoc(1, "a&b&c", null, "f1", "f2"));
        writer.close(false);
    }
    reader = DirectoryReader.openIfChanged(reader);
    validate(expected, 4, readAuthorizations, discoverAuthorizations, discoverableFields, dir, reader);
}

From source file:org.apache.blur.manager.writer.BlurIndexSimpleWriter.java

License:Apache License

private void commit() throws IOException {
    Tracer trace1 = Trace.trace("prepareCommit");
    waitUntilNotNull(_writer);/*from  www.  j a v a2 s  .c o m*/
    BlurIndexWriter writer = _writer.get();
    writer.prepareCommit();
    trace1.done();

    Tracer trace2 = Trace.trace("commit");
    writer.commit();
    trace2.done();

    Tracer trace3 = Trace.trace("index refresh");
    DirectoryReader currentReader = _indexReader.get();
    DirectoryReader newReader = DirectoryReader.openIfChanged(currentReader);
    if (newReader == null) {
        LOG.debug("Reader should be new after commit for table [{0}] shard [{1}].", _tableContext.getTable(),
                _shardContext.getShard());
    } else {
        DirectoryReader reader = wrap(newReader);
        checkForMemoryLeaks(reader, "BlurIndexSimpleWriter - reopen table [{0}] shard [{1}]");
        _indexRefreshWriteLock.lock();
        try {
            _indexReader.set(reader);
        } finally {
            _indexRefreshWriteLock.unlock();
        }
        _indexCloser.close(currentReader);
    }
    trace3.done();
}

From source file:org.apache.blur.manager.writer.MutatableActionTest.java

License:Apache License

private DirectoryReader commitAndReopen(DirectoryReader reader, IndexWriter writer) throws IOException {
    writer.commit();// www.  j a  v a2s. c om
    DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
    if (newReader == null) {
        throw new IOException("Should have new data.");
    }
    reader.close();
    return newReader;
}

From source file:org.apache.clerezza.rdf.cris.LuceneTools.java

License:Apache License

/**
 * Returns a IndexSearcher//from w w  w .  j  a va 2 s. c o m
 *
 */
public IndexSearcher getIndexSearcher() throws RuntimeException {
    //TODO make sure a current version is returned
    if (indexReader == null) {
        try {
            indexReader = DirectoryReader.open(indexDirectory);
            indexSearcher = new IndexSearcher(indexReader);
        } catch (IOException ex) {
            throw new RuntimeException(ex);
        }

    } else {
        try {
            DirectoryReader newReader = DirectoryReader.openIfChanged(indexReader);
            if (newReader != null) {
                final IndexReader oldReader = indexReader;
                indexReader = newReader;
                //would be better to use ScheduledThreadPoolExecutor
                new Thread() {

                    @Override
                    public void run() {
                        try {
                            Thread.sleep(1000);
                        } catch (InterruptedException ex) {
                            Thread.currentThread().interrupt();
                        }
                        try {
                            oldReader.close();
                        } catch (IOException ex) {
                            throw new RuntimeException(ex);
                        }
                    }

                };
            }
            indexSearcher = new IndexSearcher(indexReader);
        } catch (IOException ex) {
            throw new RuntimeException(ex);
        }
    }
    return indexSearcher;
}

From source file:org.apache.solr.uninverting.TestFieldCacheReopen.java

License:Apache License

public void testFieldCacheReuseAfterReopen() throws Exception {
    Directory dir = newDirectory();//from  www  .  j  a va2 s .  c  om
    IndexWriter writer = new IndexWriter(dir,
            newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(10)));
    Document doc = new Document();
    doc.add(new IntPoint("number", 17));
    writer.addDocument(doc);
    writer.commit();

    // Open reader1
    DirectoryReader r = DirectoryReader.open(dir);
    LeafReader r1 = getOnlyLeafReader(r);
    final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.INT_POINT_PARSER);
    assertEquals(0, ints.nextDoc());
    assertEquals(17, ints.longValue());

    // Add new segment
    writer.addDocument(doc);
    writer.commit();

    // Reopen reader1 --> reader2
    DirectoryReader r2 = DirectoryReader.openIfChanged(r);
    assertNotNull(r2);
    r.close();
    LeafReader sub0 = r2.leaves().get(0).reader();
    final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.INT_POINT_PARSER);
    r2.close();
    assertEquals(0, ints2.nextDoc());
    assertEquals(17, ints2.longValue());

    writer.close();
    dir.close();
}

From source file:org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();//from  w w  w  . j a  va2s  .co m
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());

    // this is kind of stupid, but for now its here
    assertNotSame(ir.leaves().get(0).reader().getCombinedCoreAndDeletesKey(),
            ir2.leaves().get(0).reader().getCombinedCoreAndDeletesKey());

    IOUtils.close(ir, ir2, iw, dir);
}

From source file:org.elasticsearch.common.lucene.index.ESDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();//w  w  w .j  a v  a 2s  . c o m
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());
    IOUtils.close(ir, ir2, iw, dir);
}