Example usage for org.apache.lucene.index MultiReader MultiReader

List of usage examples for org.apache.lucene.index MultiReader MultiReader

Introduction

In this page you can find the example usage for org.apache.lucene.index MultiReader MultiReader.

Prototype

public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException 

Source Link

Document

Construct a MultiReader aggregating the named set of (sub)readers.

Usage

From source file:com.sindicetech.siren.search.node.TestNodeFuzzyQuery.java

License:Open Source License

/**
 * MultiTermQuery provides (via attribute) information about which values
 * must be competitive to enter the priority queue.
 *
 * FuzzyQuery optimizes itself around this information, if the attribute
 * is not implemented correctly, there will be problems!
 *///from  w ww  . j ava  2s  . co m
public void testTieBreaker() throws Exception {
    this.addDocument("<a123456>");
    this.addDocument("<c123456>");
    this.addDocument("<d123456>");
    this.addDocument("<e123456>");

    final Directory directory2 = newDirectory();
    final RandomIndexWriter writer2 = newRandomIndexWriter(directory2, analyzer, codec);
    addDocument(writer2, "<a123456>");
    addDocument(writer2, "<b123456>");
    addDocument(writer2, "<b123456>");
    addDocument(writer2, "<b123456>");
    addDocument(writer2, "<c123456>");
    addDocument(writer2, "<f123456>");

    final IndexReader ir1 = writer.getReader();
    final IndexReader ir2 = writer2.getReader();

    final MultiReader mr = new MultiReader(ir1, ir2);
    final IndexSearcher searcher = newSearcher(mr);
    final FuzzyQuery fq = new FuzzyQuery(new Term(DEFAULT_TEST_FIELD, "z123456"), 1, 0, 2, false);
    final TopDocs docs = searcher.search(fq, 2);
    assertEquals(5, docs.totalHits); // 5 docs, from the a and b's

    mr.close();
    ir2.close();
    writer2.close();
    directory2.close();
}

From source file:com.tuplejump.stargate.IndexContainer.java

License:Apache License

public <T> T search(SearcherCallback<T> searcherCallback) {
    List<IndexReader> indexReaders = new ArrayList<>();
    Map<Indexer, IndexSearcher> indexSearchers = new HashMap<>();
    for (Map.Entry<Range<Token>, Indexer> entry : indexers.entrySet()) {
        Range<Token> range = entry.getKey();
        boolean intersects = intersects(searcherCallback.filterRange(), searcherCallback.isSingleToken(),
                searcherCallback.isFullRange(), range);
        if (intersects) {
            Indexer indexer = entry.getValue();
            IndexSearcher searcher = indexer.acquire();
            indexSearchers.put(indexer, searcher);
            indexReaders.add(searcher.getIndexReader());
        }/*w w w  .j  a va2 s  .  c o  m*/
    }
    IndexReader[] indexReadersArr = new IndexReader[indexReaders.size()];
    indexReaders.toArray(indexReadersArr);
    MultiReader multiReader = new MultiReader(indexReadersArr, false);
    IndexSearcher allSearcher = new IndexSearcher(multiReader, executorService);
    try {
        return searcherCallback.doWithSearcher(allSearcher);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        try {
            multiReader.close();
        } catch (IOException e) {
            logger.error("Could not close reader", e);
        }
        for (Map.Entry<Indexer, IndexSearcher> entry : indexSearchers.entrySet()) {
            entry.getKey().release(entry.getValue());
        }
    }
}

From source file:com.tuplejump.stargate.PerVNodeIndexContainer.java

License:Apache License

@Override
public <T> T search(SearcherCallback<T> searcherCallback) {
    List<IndexReader> indexReaders = new ArrayList<>();
    Map<Indexer, IndexSearcher> indexSearchers = new HashMap<>();
    for (Map.Entry<Range<Token>, Indexer> entry : indexers.entrySet()) {
        Range<Token> range = entry.getKey();
        boolean intersects = intersects(searcherCallback.filterRange(), searcherCallback.isSingleToken(),
                searcherCallback.isFullRange(), range);
        if (intersects) {
            Indexer indexer = entry.getValue();
            IndexSearcher searcher = indexer.acquire();
            indexSearchers.put(indexer, searcher);
            indexReaders.add(searcher.getIndexReader());
        }//  w  w  w  .  j av a2s  .  co  m
    }
    IndexReader[] indexReadersArr = new IndexReader[indexReaders.size()];
    indexReaders.toArray(indexReadersArr);
    MultiReader multiReader = null;
    try {
        multiReader = new MultiReader(indexReadersArr, false);
        IndexSearcher allSearcher = new IndexSearcher(multiReader, executorService);
        return searcherCallback.doWithSearcher(allSearcher);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        try {
            if (multiReader != null)
                multiReader.close();
        } catch (IOException e) {
            logger.error("Could not close reader", e);
        }
        for (Map.Entry<Indexer, IndexSearcher> entry : indexSearchers.entrySet()) {
            entry.getKey().release(entry.getValue());
        }
    }
}

From source file:eu.eexcess.sourceselection.redde.Redde.java

License:Apache License

private MultiReader openGeneralizedSampleDatabase() throws IOException {

    if (generalizedSampleDatabaseReader != null) {
        return generalizedSampleDatabaseReader;
    }//  ww  w. ja v  a2s.  c  om
    IndexReader[] readers = new IndexReader[Settings.testSets().size()];
    int idx = 0;
    for (TestIndexSettings setting : testSets) {
        readers[idx] = DirectoryReader.open(FSDirectory.open(new File(setting.sampledIndexPath)));
        idx++;
    }

    generalizedSampleDatabaseReader = new MultiReader(readers, true);
    return generalizedSampleDatabaseReader;
}

From source file:iac.cnr.it.Searcher.java

License:Apache License

public Searcher(String[] subIndexes) throws IOException {

    IndexReader[] subReaders = new IndexReader[subIndexes.length];
    for (int i = 0; i < subIndexes.length; i++) {
        subReaders[i] = DirectoryReader.open(FSDirectory.open(Paths.get(subIndexes[i])));
    }//  w  ww. java  2 s  .com

    multiReader = new MultiReader(subReaders, true);

    searcher = new IndexSearcher(multiReader);
    field = "contents";

    Analyzer analyzer = new StandardAnalyzer();

    parser = new QueryParser(field, analyzer);
}

From source file:io.druid.extension.lucene.LuceneDruidSegment.java

License:Apache License

/**
 * Gets an index reader for search. This can be accessed by multiple threads
 * and cannot be blocking.// www  . ja v a  2 s .  c o  m
 *
 * @return an index reader containing in memory realtime index as well as
 *         persisted indexes. Null if the index is either closed or has no
 *         documents yet indexed.
 * @throws IOException
 */
public IndexReader getIndexReader() throws IOException {
    // let's not build a reader if
    if (!isOpen) {
        return null;
    }
    List<DirectoryReader> readers = Lists.newArrayListWithCapacity(persistedReaders.size() + 1);
    readers.addAll(persistedReaders);
    DirectoryReader localReaderRef = realtimeReader;
    if (localReaderRef != null) {
        readers.add(localReaderRef);
    }
    return readers.isEmpty() ? null : new MultiReader(readers.toArray(new IndexReader[readers.size()]), false);
}

From source file:io.yucca.lucene.FieldRemover.java

License:Apache License

/**
 * Remove fields from an index. All readers and writer are closed on
 * completion or on an exception./*from  w w  w  . j av  a2 s  .  c o m*/
 * 
 * @param reader
 *            IndexReader
 * @param writer
 *            IndexWriter
 *            File destination index directory
 * @param fields
 *            String[] fields to remove
 */
public void removeFields(IndexReader reader, IndexWriter writer, String[] fields) {
    Set<String> removals = toTrimmedSet(fields);
    List<AtomicReaderContext> leaves = reader.leaves();
    AtomicReader wrappedLeaves[] = new AtomicReader[leaves.size()];
    for (int i = 0; i < leaves.size(); i++) {
        wrappedLeaves[i] = new FieldFilterAtomicReader(leaves.get(i).reader(), removals, true);
    }
    try {
        MultiReader mr = new MultiReader(wrappedLeaves, true);
        writer.addIndexes(mr);
        writer.commit();
        writer.close();
        mr.close();
    } catch (IOException e) {
        log.error("Writing new index failed.", e);
    } finally {
        IOUtils.closeWhileHandlingException(reader);
        IOUtils.closeWhileHandlingException(writer);
        IOUtils.closeWhileHandlingException(writer.getDirectory());
    }
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

License:Open Source License

/**
 * Get the main index reader augmented with the specified TX data As above but we add the TX data
 * //from  w  w  w  . j  a va 2  s. c o  m
 * @param id String
 * @param deleteOnlyNodes boolean
 * @return IndexReader
 * @throws IOException
 */
public IndexReader getMainIndexReferenceCountingReadOnlyIndexReader(String id, Set<String> deletions,
        Set<String> containerDeletions, boolean deleteOnlyNodes) throws IOException {
    if (id == null) {
        throw new IndexerException("\"null\" is not a valid identifier for a transaction");
    }
    getReadLock();
    try {
        if (indexIsShared && !checkVersion()) {
            releaseReadLock();
            getWriteLock();
            try {
                if (mainIndexReader != null) {
                    ((ReferenceCounting) mainIndexReader).setInvalidForReuse();
                }
                mainIndexReader = null;
            } finally {
                getReadLock();
                releaseWriteLock();
            }
        }

        if (mainIndexReader == null) {
            releaseReadLock();
            getWriteLock();
            try {
                if (mainIndexReader == null) {
                    // Sync with disk image if required
                    doWithFileLock(new LockWork<Object>() {
                        public Object doWork() throws Exception {
                            return null;
                        }

                        public boolean canRetry() {
                            return true;
                        }

                    });
                    mainIndexReader = createMainIndexReader();

                }
            } finally {
                getReadLock();
                releaseWriteLock();
            }
        }
        // Combine the index delta with the main index
        // Make sure the index is written to disk
        // TODO: Should use the in memory index but we often end up forcing
        // to disk anyway.
        // Is it worth it?
        // luceneIndexer.flushPending();

        IndexReader deltaReader = buildAndRegisterDeltaReader(id);
        IndexReader reader = null;
        if ((deletions == null || deletions.size() == 0)
                && (containerDeletions == null || containerDeletions.size() == 0)) {
            reader = new MultiReader(new IndexReader[] { mainIndexReader, deltaReader }, false);
        } else {
            IndexReader filterReader = new FilterIndexReaderByStringId("main+id", mainIndexReader, deletions,
                    containerDeletions, deleteOnlyNodes);
            reader = new MultiReader(new IndexReader[] { filterReader, deltaReader }, false);
            // Cancel out extra incRef made by MultiReader
            filterReader.decRef();
        }

        // The reference count would have been incremented automatically by MultiReader /
        // FilterIndexReaderByStringId
        deltaReader.decRef();
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("Main index reader references = "
                    + ((ReferenceCounting) mainIndexReader).getReferenceCount());
        }
        reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER + id, reader, false,
                config);
        ReferenceCounting refCounting = (ReferenceCounting) reader;
        reader.incRef();
        refCounting.setInvalidForReuse();
        return reader;
    } finally {
        releaseReadLock();
    }
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

License:Open Source License

private IndexReader createMainIndexReader() throws IOException {
    IndexReader reader = null;/* w  w w  .  ja v a2  s .co  m*/
    IndexReader oldReader = null;
    for (String id : indexEntries.keySet()) {
        IndexEntry entry = indexEntries.get(id);
        if (entry.getStatus().isCommitted()) {
            IndexReader subReader = getReferenceCountingIndexReader(id);
            if (reader == null) {
                reader = subReader;
            } else {
                boolean oldReaderIsSubReader = oldReader == null;
                oldReader = reader;
                reader = mainIndexReaders.get(id);
                if (reader == null) {
                    if (entry.getType() == IndexType.INDEX) {
                        reader = new MultiReader(new IndexReader[] { oldReader, subReader }, false);
                    } else if (entry.getType() == IndexType.DELTA) {
                        try {
                            IndexReader filterReader = new FilterIndexReaderByStringId(id, oldReader,
                                    getDeletions(entry.getName(), INDEX_INFO_DELETIONS),
                                    getDeletions(entry.getName(), INDEX_INFO_CONTAINER_DELETIONS),
                                    entry.isDeletOnlyNodes());
                            reader = new MultiReader(new IndexReader[] { filterReader, subReader }, false);
                            // Cancel out the incRef on the filter reader
                            filterReader.decRef();
                        } catch (IOException ioe) {
                            s_logger.error("Failed building filter reader beneath " + entry.getName(), ioe);
                            throw ioe;
                        }
                    }
                    reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(id + "multi", reader,
                            true, config);
                    mainIndexReaders.put(id, reader);
                }
            }
        }
    }
    if (reader == null) {
        reader = IndexReader.open(emptyIndex);
    } else {
        // Keep this reader open whilst it is referenced by mainIndexReaders / referenceCountingReadOnlyIndexReaders
        reader.incRef();
    }

    reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader(MAIN_READER, reader, false, config);
    return reader;
}

From source file:org.apache.solr.uninverting.TestFieldCacheSanityChecker.java

License:Apache License

@Override
public void setUp() throws Exception {
    super.setUp();
    dirA = newDirectory();//from  ww w.  j av  a2 s .  c  om
    dirB = newDirectory();

    IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(new MockAnalyzer(random())));
    IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(new MockAnalyzer(random())));

    long theLong = Long.MAX_VALUE;
    double theDouble = Double.MAX_VALUE;
    int theInt = Integer.MAX_VALUE;
    float theFloat = Float.MAX_VALUE;
    for (int i = 0; i < NUM_DOCS; i++) {
        Document doc = new Document();
        doc.add(new LegacyLongField("theLong", theLong--, Field.Store.NO));
        doc.add(new LegacyDoubleField("theDouble", theDouble--, Field.Store.NO));
        doc.add(new LegacyIntField("theInt", theInt--, Field.Store.NO));
        doc.add(new LegacyFloatField("theFloat", theFloat--, Field.Store.NO));
        if (0 == i % 3) {
            wA.addDocument(doc);
        } else {
            wB.addDocument(doc);
        }
    }
    wA.close();
    wB.close();
    DirectoryReader rA = DirectoryReader.open(dirA);
    readerA = SlowCompositeReaderWrapper.wrap(rA);
    readerAclone = SlowCompositeReaderWrapper.wrap(rA);
    readerA = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dirA));
    readerB = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dirB));
    readerX = SlowCompositeReaderWrapper.wrap(new MultiReader(readerA, readerB));
}