Example usage for org.apache.lucene.index MultiReader MultiReader

List of usage examples for org.apache.lucene.index MultiReader MultiReader

Introduction

In this page you can find the example usage for org.apache.lucene.index MultiReader MultiReader.

Prototype

public MultiReader(IndexReader... subReaders) throws IOException 

Source Link

Document

Construct a MultiReader aggregating the named set of (sub)readers.

Usage

From source file:org.watermint.sourcecolon.org.opensolaris.opengrok.search.SearchEngine.java

License:Open Source License

/**
 * @param paging whether to use paging (if yes, first X pages will load faster)
 * @param root   list of projects to search
 * @throws IOException/*from ww  w  .j a v a2  s.c  o  m*/
 */
private void searchMultiDatabase(List<Project> root, boolean paging) throws IOException {
    IndexReader[] searchables = new IndexReader[root.size()];
    File droot = new File(RuntimeEnvironment.getInstance().getDataRootFile(), "index");
    int ii = 0;
    for (Project project : root) {
        searchables[ii++] = (IndexReader.open(FSDirectory.open(new File(droot, project.getPath()))));
    }
    searcher = new IndexSearcher(new MultiReader(searchables));
    collector = TopScoreDocCollector.create(hitsPerPage * cachePages, docsScoredInOrder);
    searcher.search(query, collector);
    totalHits = collector.getTotalHits();
    if (!paging) {
        collector = TopScoreDocCollector.create(totalHits, docsScoredInOrder);
        searcher.search(query, collector);
    }
    hits = collector.topDocs().scoreDocs;
    for (ScoreDoc hit : hits) {
        int docId = hit.doc;
        Document d = searcher.doc(docId);
        docs.add(d);
    }
}

From source file:org.watermint.sourcecolon.org.opensolaris.opengrok.web.SearchHelper.java

License:Open Source License

/**
 * Create the searcher to use wrt. to currently set parameters and the given
 * projects. Does not produce any {@link #redirect} link. It also does
 * nothing if {@link #redirect} or {@link #errorMsg} have a none-{@code null}
 * value.//from  w  w w .  j  a v a 2 s .c  o  m
 * <p/>
 * Parameters which should be populated/set at this time:
 * <ul>
 * <li>{@link #builder}</li>
 * <li>{@link #dataRoot}</li>
 * <li>{@link #order} (falls back to relevance if unset)</li>
 * </ul>
 * Populates/sets:
 * <ul>
 * <li>{@link #query}</li>
 * <li>{@link #searcher}</li>
 * <li>{@link #sort}</li>
 * <li>{@link #projects}</li>
 * <li>{@link #errorMsg} if an error occurs</li>
 * </ul>
 *
 * @param projects project to use query. If empty, a none-project opengrok
 *                 setup is assumed (i.e. DATA_ROOT/index will be used instead of possible
 *                 multiple DATA_ROOT/$project/index).
 * @return this instance
 */
public SearchHelper prepareExec(SortedSet<String> projects) {
    if (redirect != null || errorMsg != null) {
        return this;
    }
    // the Query created by the QueryBuilder
    try {
        query = builder.build();
        if (projects == null) {
            errorMsg = "No project selected!";
            return this;
        }
        this.projects = projects;
        File indexDir = new File(dataRoot, "index");
        if (projects.isEmpty()) {
            //no project setup
            FSDirectory dir = FSDirectory.open(indexDir);
            searcher = new IndexSearcher(IndexReader.open(dir));
        } else if (projects.size() == 1) {
            // just 1 project selected
            FSDirectory dir = FSDirectory.open(new File(indexDir, projects.first()));
            searcher = new IndexSearcher(IndexReader.open(dir));
        } else {
            //more projects
            IndexReader[] searchables = new IndexReader[projects.size()];
            int ii = 0;
            //TODO might need to rewrite to Project instead of
            // String , need changes in og_projects.jspf too
            for (String proj : projects) {
                FSDirectory dir = FSDirectory.open(new File(indexDir, proj));
                searchables[ii++] = IndexReader.open(dir);
            }
            searcher = new IndexSearcher(new MultiReader(searchables));
        }
        // TODO check if below is somehow reusing sessions so we don't
        // requery again and again, I guess 2min timeout sessions could be
        // usefull, since you click on the next page within 2mins, if not,
        // then wait ;)
        switch (order) {
        case LASTMODIFIED:
            sort = new Sort(new SortField("date", SortField.STRING, true));
            break;
        case BY_PATH:
            sort = new Sort(new SortField("fullpath", SortField.STRING));
            break;
        default:
            sort = Sort.RELEVANCE;
            break;
        }
    } catch (ParseException e) {
        errorMsg = PARSE_ERROR_MSG + e.getMessage();
    } catch (FileNotFoundException e) {
        //          errorMsg = "Index database(s) not found: " + e.getMessage();
        errorMsg = "Index database(s) not found.";
    } catch (Exception e) {
        errorMsg = e.getMessage();
    }
    return this;
}

From source file:org.wso2.carbon.analytics.dataservice.core.indexing.AnalyticsDataIndexer.java

License:Open Source License

private MultiReader getCombinedIndexReader(Set<Integer> shardIds, int tenantId, String tableName)
        throws IOException, AnalyticsIndexException {
    List<IndexReader> indexReaders = new ArrayList<>();
    for (int shardId : shardIds) {
        String tableId = this.generateTableId(tenantId, tableName);
        try {// w ww .j  a  v a2s  .c o  m
            IndexReader reader = DirectoryReader.open(this.lookupIndexWriter(shardId, tableId), true);
            indexReaders.add(reader);
        } catch (IndexNotFoundException ignore) {
            /* this can happen if a user just started to index records in a table,
             * but it didn't yet do the first commit, so it does not have segment* files.
             * The execution comes to this place, because the shards are identified, since
             * there is some other intermediate files written to the index directory. 
             * So in this situation, if we are in the middle of the initial commit, we ignore
             * this partially indexed data for now */
        }
    }
    return new MultiReader(indexReaders.toArray(new IndexReader[indexReaders.size()]));
}

From source file:org.xcmis.search.lucene.index.CacheableIndexDataManager.java

License:Open Source License

/**
 * {@inheritDoc}//from w  w  w .  ja  v a  2  s.com
 * 
 * @throws IndexException
 */
@Override
public IndexReader getIndexReader() throws IndexException {

    synchronized (memoryChains) {
        synchronized (updateMonitor) {
            IndexReader result = super.getIndexReader();

            if (memoryChains.size() > 0) {
                final List<IndexReader> readers = new ArrayList<IndexReader>(memoryChains.size());
                final Iterator<LuceneIndexDataManager> it = memoryChains.iterator();

                while (it.hasNext()) {
                    final LuceneIndexDataManager chain = it.next();

                    final IndexReader indexReader = chain.getIndexReader();
                    if (indexReader != null) {
                        readers.add(indexReader);
                    }

                }
                if (result != null) {
                    readers.add(result);
                }
                if (readers.size() > 1) {
                    final IndexReader[] indexReaders = new IndexReader[readers.size()];
                    result = new MultiReader(readers.toArray(indexReaders));
                } else if (readers.size() == 1) {
                    result = readers.get(0);
                } else {
                    throw new IndexReaderNotFoundException("No readers found");
                }

            }
            if (result == null) {
                try {
                    RAMDirectory directory = new RAMDirectory();
                    IndexWriter.MaxFieldLength fieldLength = new IndexWriter.MaxFieldLength(
                            IndexWriter.DEFAULT_MAX_FIELD_LENGTH);
                    IndexWriter iw = new IndexWriter(directory, new SimpleAnalyzer(), true, fieldLength);
                    iw.close();
                    result = IndexReader.open(directory);
                } catch (IOException e) {
                    throw new IndexException("Unable to initialize index: empty index ");
                }

            }
            return result;
        }
    }

}

From source file:org.xcmis.search.lucene.index.LocalStorageIndexDataManager.java

License:Open Source License

public IndexReader getIndexReader() throws IndexException {
    IndexReader result = null;//from w w  w. j a va  2 s . c  om
    if (chains.size() > 0) {
        synchronized (chains) {
            if (chains.size() > 0) {
                final List<IndexReader> readers = new ArrayList<IndexReader>(chains.size());
                final Iterator<PersistedIndex> it = chains.iterator();

                while (it.hasNext()) {
                    final LuceneIndexDataManager chain = it.next();

                    final IndexReader indexReader = chain.getIndexReader();
                    if (indexReader != null) {
                        readers.add(indexReader);
                    }

                }
                if (result != null) {
                    readers.add(result);
                }
                if (readers.size() > 1) {
                    final IndexReader[] indexReaders = new IndexReader[readers.size()];
                    result = new MultiReader(readers.toArray(indexReaders));
                } else if (readers.size() == 1) {
                    result = readers.get(0);
                } else {
                    throw new RuntimeException("No readers found");
                }
            }
        }
        if (result == null) {
            throw new RuntimeException("No readers found");
        }
    }
    return result;
}

From source file:stroom.search.TestBasicSearch.java

License:Apache License

@Test
public void testSimple() throws IOException {
    final IndexFields indexFields = IndexFields.createStreamIndexFields();
    final IndexField idField = IndexField.createField("Id", AnalyzerType.ALPHA_NUMERIC, false, true, true,
            false);//from   w  ww  . ja v a2s. com
    final IndexField testField = IndexField.createField("test", AnalyzerType.ALPHA_NUMERIC, false, true, true,
            false);
    final IndexField nonStoreField = IndexField.createField("nonstore", AnalyzerType.ALPHA_NUMERIC, false,
            false, true, false);
    indexFields.add(idField);
    indexFields.add(testField);
    indexFields.add(nonStoreField);
    final int indexTestSize = 10;

    final String indexName = "TEST";
    final Index index = commonTestScenarioCreator.createIndex(indexName, indexFields);

    final IndexShardKey indexShardKey = IndexShardKeyUtil.createTestKey(index);

    // Do some work.
    for (int i = 1; i <= indexTestSize; i++) {
        final Field idFld = FieldFactory.create(idField, i + ":" + i);
        final Field testFld = FieldFactory.create(testField, "test");
        final Field nonStoredFld = FieldFactory.create(nonStoreField, "test");

        final Document document = new Document();
        document.add(idFld);
        document.add(testFld);
        document.add(nonStoredFld);

        // final PoolItem<IndexShardKey, IndexShardWriter> poolItem =
        // .borrowObject(indexShardKey, true);
        final IndexShardWriter writer = indexShardWriterCache.get(indexShardKey);
        writer.addDocument(document);
        // indexShardWriterPool.returnObject(poolItem, true);
    }

    indexShardWriterCache.flushAll();

    final FindIndexShardCriteria criteria = new FindIndexShardCriteria();
    criteria.getIndexIdSet().add(index);
    final List<IndexShard> shards = indexShardService.find(criteria);

    // Open readers and add reader searcher to the multi searcher.
    final IndexShardSearcher[] readers = new IndexShardSearcherImpl[shards.size()];
    int i = 0;
    for (final IndexShard indexShard : shards) {
        final IndexShardSearcher indexShardSearcher = new IndexShardSearcherImpl(indexShard);
        readers[i++] = indexShardSearcher;
    }

    final IndexReader[] searchables = new IndexReader[readers.length];
    for (i = 0; i < readers.length; i++) {
        readers[i].open();
        searchables[i] = readers[i].getReader();
    }
    final MultiReader multiReader = new MultiReader(searchables);
    final IndexSearcher indexSearcher = new IndexSearcher(multiReader);

    final TermQuery termQuery = new TermQuery(new Term("test", "test"));
    final MaxHitCollector maxHitCollector = new MaxHitCollector(3000);
    indexSearcher.search(termQuery, maxHitCollector);
    Assert.assertEquals(indexTestSize, maxHitCollector.getDocIdList().size());

    for (final Integer id : maxHitCollector.getDocIdList()) {
        final Document doc = indexSearcher.doc(id);
        final IndexableField testFld = doc.getField("test");
        Assert.assertNotNull(testFld);
        Assert.assertEquals("test", testFld.stringValue());
    }

    final TermQuery termQuery2 = new TermQuery(new Term("nonstore", "test"));
    final MaxHitCollector maxHitCollector2 = new MaxHitCollector(3000);
    indexSearcher.search(termQuery2, maxHitCollector2);
    Assert.assertEquals(indexTestSize, maxHitCollector2.getDocIdList().size());

    for (final Integer id : maxHitCollector2.getDocIdList()) {
        final Document doc = indexSearcher.doc(id);
        final IndexableField testFld = doc.getField("test");
        Assert.assertNotNull(testFld);
        Assert.assertEquals("test", testFld.stringValue());
        final IndexableField nonstoreField = doc.getField("nonstore");
        Assert.assertNull(nonstoreField);
    }

    // Close readers.
    for (final IndexShardSearcher reader : readers) {
        reader.close();
    }
}