List of usage examples for org.apache.lucene.index MultiReader MultiReader
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException
Construct a MultiReader aggregating the named set of (sub)readers.
From source file:org.archive.tnh.IndexOpener.java
License:Apache License
/** * Opens an IndexReader for the given directory. The directory may * be a plain-old Lucene index, a parallel index, or a root * directory of index shards. In the case of shards, this method is * called recursively to open the sub-indexes and combine them into * a MultiReader./* w w w. j a va 2s . c o m*/ */ public static IndexReader openIndexReader(File directory, int indexDivisor) throws IOException { if (directory == null) throw new IllegalArgumentException("directory cannot be null"); if (!directory.isDirectory()) throw new IllegalArgumentException("not a directory: " + directory); File[] subDirs = directory.listFiles(DIR_FILTER); // If there are no sub-dirs, just open this as an IndexReader if (subDirs.length == 0) { return IndexReader.open(new MMapDirectory(directory), new KeepOnlyLastCommitDeletionPolicy(), true, indexDivisor); } // This directory has sub-dirs, and they are parallel. if (directory.listFiles(PARALLEL_FILTER).length == 1) { ArchiveParallelReader preader = new ArchiveParallelReader(); for (int i = 0; i < subDirs.length; i++) { preader.add(IndexReader.open(new MMapDirectory(subDirs[i]), new KeepOnlyLastCommitDeletionPolicy(), true, indexDivisor)); } return preader; } // This directory has sub-dirs, but they are not parallel, so they // are shards. IndexReader[] subReaders = new IndexReader[subDirs.length]; for (int i = 0; i < subDirs.length; i++) { subReaders[i] = openIndexReader(subDirs[i], indexDivisor); } IndexReader multi = new MultiReader(subReaders, true); return multi; }
From source file:org.elasticsearch.index.query.PercolateQueryBuilder.java
License:Apache License
private IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, ParsedDocument doc) { IndexReader[] memoryIndices = new IndexReader[doc.docs().size()]; List<ParseContext.Document> docs = doc.docs(); int rootDocIndex = docs.size() - 1; assert rootDocIndex > 0; for (int i = 0; i < docs.size(); i++) { ParseContext.Document d = docs.get(i); MemoryIndex memoryIndex = MemoryIndex.fromDocument(d, analyzer, true, false); memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); }/* w w w .j a v a2 s. c o m*/ try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { @Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(query, BooleanClause.Occur.MUST); bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); return super.createNormalizedWeight(bq.build(), needsScores); } }; slowSearcher.setQueryCache(null); return slowSearcher; } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); } }
From source file:org.elasticsearch.index.query.PercolatorQueryBuilder.java
License:Apache License
private IndexSearcher createMultiDocumentSearcher(DocumentMapper docMapper, Analyzer defaultAnalyzer, ParsedDocument doc) {//from w w w .ja v a2 s. c o m IndexReader[] memoryIndices = new IndexReader[doc.docs().size()]; List<ParseContext.Document> docs = doc.docs(); int rootDocIndex = docs.size() - 1; assert rootDocIndex > 0; for (int i = 0; i < docs.size(); i++) { ParseContext.Document d = docs.get(i); MemoryIndex memoryIndex = new MemoryIndex(true); indexDoc(docMapper, defaultAnalyzer, d, memoryIndex); memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); } try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { @Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(query, BooleanClause.Occur.MUST); bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); return super.createNormalizedWeight(bq.build(), needsScores); } }; slowSearcher.setQueryCache(null); return slowSearcher; } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); } }
From source file:org.elasticsearch.index.search.child.DeleteByQueryWrappingFilter.java
License:Apache License
@Override public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException { SearchContext searchContext = SearchContext.current(); if (weight == null) { assert searcher == null; searcher = searchContext.searcher(); IndexReader indexReader = SearchContext.current().searcher().getIndexReader(); IndexReader multiReader = null;/*from w w w. java2 s. c om*/ try { if (!contains(indexReader, context)) { multiReader = new MultiReader(new IndexReader[] { indexReader, context.reader() }, false); Similarity similarity = searcher.getSimilarity(); searcher = new IndexSearcher(new MultiReader(indexReader, context.reader())); searcher.setSimilarity(similarity); } weight = searcher.createNormalizedWeight(query); } finally { if (multiReader != null) { multiReader.close(); } } } else { IndexReader indexReader = searcher.getIndexReader(); if (!contains(indexReader, context)) { IndexReader multiReader = new MultiReader(new IndexReader[] { indexReader, context.reader() }, false); try { Similarity similarity = searcher.getSimilarity(); searcher = new IndexSearcher(multiReader); searcher.setSimilarity(similarity); weight = searcher.createNormalizedWeight(query); } finally { multiReader.close(); } } } return new DocIdSet() { @Override public DocIdSetIterator iterator() throws IOException { return weight.scorer(context, true, false, acceptDocs); } @Override public boolean isCacheable() { return false; } }; }
From source file:org.elasticsearch.percolator.MultiDocumentPercolatorIndex.java
License:Apache License
@Override public void prepare(PercolateContext context, ParsedDocument parsedDocument) { int docCounter = 0; IndexReader[] memoryIndices = new IndexReader[parsedDocument.docs().size()]; for (ParseContext.Document d : parsedDocument.docs()) { memoryIndices[docCounter] = indexDoc(d, parsedDocument.analyzer()).createSearcher().getIndexReader(); docCounter++;//from w w w . j a v a 2s . co m } MultiReader mReader = new MultiReader(memoryIndices, true); try { AtomicReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); DocSearcher docSearcher = new DocSearcher(new IndexSearcher(slowReader)); context.initialize(docSearcher, parsedDocument); } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); } }
From source file:org.lexevs.dao.index.lucenesupport.MultiReaderFactory.java
License:Open Source License
@Override public MultiReader getObject() throws Exception { List<IndexReader> readers = new ArrayList<IndexReader>(); for (CodingSchemeMetaData md : concurrentMetaData.getCodingSchemeList()) { readers.add(md.getDirectory().getIndexReader()); }//from w ww . ja v a 2s.c o m return new MultiReader(readers.toArray(new IndexReader[readers.size()]), false); }
From source file:org.neo4j.index.impl.lucene.legacy.LuceneLegacyIndex.java
License:Open Source License
private IndexHits<Document> search(IndexReference searcherRef, IndexSearcher fulltextTransactionStateSearcher, Query query, QueryContext additionalParametersOrNull, Collection<EntityId> removed) throws IOException { if (fulltextTransactionStateSearcher != null && !removed.isEmpty()) { letThroughAdditions(fulltextTransactionStateSearcher, query, removed); }//from ww w . j av a 2 s.co m IndexSearcher searcher = fulltextTransactionStateSearcher == null ? searcherRef.getSearcher() : new IndexSearcher(new MultiReader(searcherRef.getSearcher().getIndexReader(), fulltextTransactionStateSearcher.getIndexReader())); IndexHits<Document> result; if (additionalParametersOrNull != null && additionalParametersOrNull.getTop() > 0) { result = new TopDocsIterator(query, additionalParametersOrNull, searcher); } else { Sort sorting = additionalParametersOrNull != null ? additionalParametersOrNull.getSorting() : null; boolean forceScore = additionalParametersOrNull == null || !additionalParametersOrNull.getTradeCorrectnessForSpeed(); DocValuesCollector collector = new DocValuesCollector(forceScore); searcher.search(query, collector); return collector.getIndexHits(sorting); } return result; }
From source file:org.neo4j.index.impl.lucene.LuceneIndex.java
License:Open Source License
private IndexHits<Document> search(IndexReference searcherRef, Query query, QueryContext additionalParametersOrNull, IndexSearcher additionsSearcher, Collection<Long> removed) { try {/*from ww w. j a v a 2 s.c om*/ if (additionsSearcher != null && !removed.isEmpty()) { letThroughAdditions(additionsSearcher, query, removed); } IndexSearcher searcher = additionsSearcher == null ? searcherRef.getSearcher() : new IndexSearcher(new MultiReader(searcherRef.getSearcher().getIndexReader(), additionsSearcher.getIndexReader())); IndexHits<Document> result = null; if (additionalParametersOrNull != null && additionalParametersOrNull.getTop() > 0) { result = new TopDocsIterator(query, additionalParametersOrNull, searcher); } else { Sort sorting = additionalParametersOrNull != null ? additionalParametersOrNull.getSorting() : null; boolean forceScore = additionalParametersOrNull == null || !additionalParametersOrNull.getTradeCorrectnessForSpeed(); Hits hits = new Hits(searcher, query, null, sorting, forceScore); result = new HitsIterator(hits); } return result; } catch (IOException e) { throw new RuntimeException("Unable to query " + this + " with " + query, e); } }
From source file:org.opengrok.indexer.configuration.RuntimeEnvironment.java
License:Open Source License
/** * Return collection of IndexReader objects as MultiReader object * for given list of projects.// ww w . jav a2s . c o m * The caller is responsible for releasing the IndexSearcher objects * so we add them to the map. * * @param projects list of projects * @param searcherList each SuperIndexSearcher produced will be put into this list * @return MultiReader for the projects */ public MultiReader getMultiReader(SortedSet<String> projects, ArrayList<SuperIndexSearcher> searcherList) { IndexReader[] subreaders = new IndexReader[projects.size()]; int ii = 0; // TODO might need to rewrite to Project instead of String, need changes in projects.jspf too. for (String proj : projects) { try { SuperIndexSearcher searcher = RuntimeEnvironment.getInstance().getIndexSearcher(proj); subreaders[ii++] = searcher.getIndexReader(); searcherList.add(searcher); } catch (IOException | NullPointerException ex) { LOGGER.log(Level.SEVERE, "cannot get IndexReader for project " + proj, ex); return null; } } MultiReader multiReader = null; try { multiReader = new MultiReader(subreaders, true); } catch (IOException ex) { LOGGER.log(Level.SEVERE, "cannot construct MultiReader for set of projects", ex); } return multiReader; }
From source file:org.opensolaris.opengrok.configuration.RuntimeEnvironment.java
License:Open Source License
/** * Return collection of IndexReader objects as MultiReader object * for given list of projects.//ww w .ja va2 s. c o m * The caller is responsible for releasing the IndexSearcher objects * so we add them to the map. * * @param projects list of projects * @param searcherList each SuperIndexSearcher produced will be put into this list * @return MultiReader for the projects */ public MultiReader getMultiReader(SortedSet<String> projects, ArrayList<SuperIndexSearcher> searcherList) { IndexReader[] subreaders = new IndexReader[projects.size()]; int ii = 0; // TODO might need to rewrite to Project instead of // String , need changes in projects.jspf too for (String proj : projects) { try { SuperIndexSearcher searcher = RuntimeEnvironment.getInstance().getIndexSearcher(proj); subreaders[ii++] = searcher.getIndexReader(); searcherList.add(searcher); } catch (IOException ex) { LOGGER.log(Level.SEVERE, "cannot get IndexReader for project" + proj, ex); return null; } catch (NullPointerException ex) { LOGGER.log(Level.SEVERE, "cannot get IndexReader for project" + proj, ex); return null; } } MultiReader multiReader = null; try { multiReader = new MultiReader(subreaders, true); } catch (IOException ex) { LOGGER.log(Level.SEVERE, "cannot construct MultiReader for set of projects", ex); } return multiReader; }