List of usage examples for org.apache.lucene.search IndexSearcher setQueryCache
public void setQueryCache(QueryCache queryCache)
From source file:org.elasticsearch.index.query.functionscore.FunctionScoreTests.java
License:Apache License
public void testPropagatesApproximations() throws IOException { Query query = new RandomApproximationQuery(new MatchAllDocsQuery(), random()); IndexSearcher searcher = newSearcher(reader); searcher.setQueryCache(null); // otherwise we could get a cached entry that does not have approximations FunctionScoreQuery fsq = new FunctionScoreQuery(query, null, null); for (boolean needsScores : new boolean[] { true, false }) { Weight weight = searcher.createWeight(fsq, needsScores); Scorer scorer = weight.scorer(reader.leaves().get(0)); assertNotNull(scorer.twoPhaseIterator()); }//from w ww .j av a 2s . com FiltersFunctionScoreQuery ffsq = new FiltersFunctionScoreQuery(query, ScoreMode.Sum, new FilterFunction[0], Float.POSITIVE_INFINITY, null); for (boolean needsScores : new boolean[] { true, false }) { Weight weight = searcher.createWeight(ffsq, needsScores); Scorer scorer = weight.scorer(reader.leaves().get(0)); assertNotNull(scorer.twoPhaseIterator()); } }
From source file:org.elasticsearch.index.query.PercolateQueryBuilder.java
License:Apache License
@Override protected Query doToQuery(QueryShardContext context) throws IOException { if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { throw new IllegalStateException("query builder must be rewritten first"); }//from www .ja va 2 s . c o m if (document == null) { throw new IllegalStateException("nothing to percolator"); } MapperService mapperService = context.getMapperService(); DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); DocumentMapper docMapper = docMapperForType.getDocumentMapper(); ParsedDocument doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document)); FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer(); // Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when // 'index.percolator.map_unmapped_fields_as_string' is enabled: Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) { @Override protected Analyzer getWrappedAnalyzer(String fieldName) { Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName); if (analyzer != null) { return analyzer; } else { return context.getAnalysisService().defaultIndexAnalyzer(); } } }; final IndexSearcher docSearcher; if (doc.docs().size() > 1) { assert docMapper.hasNestedObjects(); docSearcher = createMultiDocumentSearcher(analyzer, doc); } else { MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc.rootDoc(), analyzer, true, false); docSearcher = memoryIndex.createSearcher(); docSearcher.setQueryCache(null); } PercolatorQueryCache registry = context.getPercolatorQueryCache(); if (registry == null) { throw new QueryShardException(context, "no percolator query registry"); } PercolateQuery.Builder builder = new PercolateQuery.Builder(documentType, registry, document, docSearcher); Settings indexSettings = registry.getIndexSettings().getSettings(); if (indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) .onOrAfter(Version.V_5_0_0_alpha1)) { MappedFieldType fieldType = context.fieldMapper(field); if (fieldType == null) { throw new QueryShardException(context, "field [" + field + "] does not exist"); } if (!(fieldType instanceof PercolatorFieldMapper.PercolatorFieldType)) { throw new QueryShardException(context, "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]"); } PercolatorFieldMapper.PercolatorFieldType pft = (PercolatorFieldMapper.PercolatorFieldType) fieldType; builder.extractQueryTermsQuery(pft.getExtractedTermsField(), pft.getUnknownQueryFieldName()); } else { Query percolateTypeQuery = new TermQuery( new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME)); builder.setPercolateTypeQuery(percolateTypeQuery); } return builder.build(); }
From source file:org.elasticsearch.index.query.PercolateQueryBuilder.java
License:Apache License
private IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, ParsedDocument doc) { IndexReader[] memoryIndices = new IndexReader[doc.docs().size()]; List<ParseContext.Document> docs = doc.docs(); int rootDocIndex = docs.size() - 1; assert rootDocIndex > 0; for (int i = 0; i < docs.size(); i++) { ParseContext.Document d = docs.get(i); MemoryIndex memoryIndex = MemoryIndex.fromDocument(d, analyzer, true, false); memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); }/*from www.j a v a 2 s. c o m*/ try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { @Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(query, BooleanClause.Occur.MUST); bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); return super.createNormalizedWeight(bq.build(), needsScores); } }; slowSearcher.setQueryCache(null); return slowSearcher; } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); } }
From source file:org.elasticsearch.index.query.PercolatorQueryBuilder.java
License:Apache License
@Override protected Query doToQuery(QueryShardContext context) throws IOException { if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { throw new IllegalStateException("query builder must be rewritten first"); }//from w w w. jav a 2 s. c o m if (document == null) { throw new IllegalStateException("nothing to percolator"); } MapperService mapperService = context.getMapperService(); DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); DocumentMapper docMapper = docMapperForType.getDocumentMapper(); ParsedDocument doc = docMapper .parse(source(document).index(context.index().getName()).id("_temp_id").type(documentType)); Analyzer defaultAnalyzer = context.getAnalysisService().defaultIndexAnalyzer(); final IndexSearcher docSearcher; if (doc.docs().size() > 1) { assert docMapper.hasNestedObjects(); docSearcher = createMultiDocumentSearcher(docMapper, defaultAnalyzer, doc); } else { // TODO: we may want to bring to MemoryIndex thread local cache back... // but I'm unsure about the real benefits. MemoryIndex memoryIndex = new MemoryIndex(true); indexDoc(docMapper, defaultAnalyzer, doc.rootDoc(), memoryIndex); docSearcher = memoryIndex.createSearcher(); docSearcher.setQueryCache(null); } PercolatorQueryCache registry = context.getPercolatorQueryCache(); if (registry == null) { throw new QueryShardException(context, "no percolator query registry"); } Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME)); PercolatorQuery.Builder builder = new PercolatorQuery.Builder(documentType, registry, document, docSearcher, percolateTypeQuery); Settings indexSettings = registry.getIndexSettings().getSettings(); if (indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0)) { builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME); } return builder.build(); }
From source file:org.elasticsearch.index.query.PercolatorQueryBuilder.java
License:Apache License
private IndexSearcher createMultiDocumentSearcher(DocumentMapper docMapper, Analyzer defaultAnalyzer, ParsedDocument doc) {//www . j av a 2s.co m IndexReader[] memoryIndices = new IndexReader[doc.docs().size()]; List<ParseContext.Document> docs = doc.docs(); int rootDocIndex = docs.size() - 1; assert rootDocIndex > 0; for (int i = 0; i < docs.size(); i++) { ParseContext.Document d = docs.get(i); MemoryIndex memoryIndex = new MemoryIndex(true); indexDoc(docMapper, defaultAnalyzer, d, memoryIndex); memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); } try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { @Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(query, BooleanClause.Occur.MUST); bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); return super.createNormalizedWeight(bq.build(), needsScores); } }; slowSearcher.setQueryCache(null); return slowSearcher; } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); } }
From source file:org.elasticsearch.index.shard.IndexSearcherWrapper.java
License:Apache License
/** * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher * gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned. * * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search) *///w w w.j ava2 s . c o m public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException { final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader .getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader()); if (elasticsearchDirectoryReader == null) { throw new IllegalStateException("Can't wrap non elasticsearch directory reader"); } NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper( engineSearcher.getDirectoryReader()); DirectoryReader reader = wrap(nonClosingReaderWrapper); if (reader != nonClosingReaderWrapper) { if (reader.getCoreCacheKey() != elasticsearchDirectoryReader.getCoreCacheKey()) { throw new IllegalStateException( "wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" + " to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs"); } if (ElasticsearchDirectoryReader .getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) { // prevent that somebody wraps with a non-filter reader throw new IllegalStateException( "wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't"); } } final IndexSearcher origIndexSearcher = engineSearcher.searcher(); final IndexSearcher innerIndexSearcher = new IndexSearcher(reader); innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache()); innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy()); innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity(true)); // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times final IndexSearcher indexSearcher = wrap(innerIndexSearcher); if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) { return engineSearcher; } else { return new Engine.Searcher(engineSearcher.source(), indexSearcher) { @Override public void close() throws ElasticsearchException { try { reader().close(); // we close the reader to make sure wrappers can release resources if needed.... // our NonClosingReaderWrapper makes sure that our reader is not closed } catch (IOException e) { throw new ElasticsearchException("failed to close reader", e); } finally { engineSearcher.close(); } } }; } }
From source file:org.elasticsearch.index.shard.ShardSplittingQuery.java
License:Apache License
private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) { return context -> { Query query = Queries.newNonNestedFilter(indexVersionCreated); final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); final Weight weight = searcher.createNormalizedWeight(query, false); Scorer s = weight.scorer(context); return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc()); };//from w w w. j a v a 2 s .c om }
From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java
License:Apache License
void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId, boolean hasNested) throws IOException { try (IndexReader reader = DirectoryReader.open(dir)) { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); final boolean needsScores = false; final Weight splitWeight = searcher.createNormalizedWeight( new ShardSplittingQuery(metaData, targetShardId, hasNested), needsScores); final List<LeafReaderContext> leaves = reader.leaves(); for (final LeafReaderContext ctx : leaves) { Scorer scorer = splitWeight.scorer(ctx); DocIdSetIterator iterator = scorer.iterator(); SortedNumericDocValues shard_id = ctx.reader().getSortedNumericDocValues("shard_id"); int numExpected = 0; while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { if (targetShardId == shard_id.nextValue()) { numExpected++;//from w w w .j a v a 2s . co m } } if (numExpected == ctx.reader().maxDoc()) { // all docs belong in this shard assertEquals(DocIdSetIterator.NO_MORE_DOCS, iterator.nextDoc()); } else { shard_id = ctx.reader().getSortedNumericDocValues("shard_id"); int doc; int numActual = 0; int lastDoc = 0; while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { lastDoc = doc; while (shard_id.nextDoc() < doc) { long shardID = shard_id.nextValue(); assertEquals(shardID, targetShardId); numActual++; } assertEquals(shard_id.docID(), doc); long shardID = shard_id.nextValue(); BytesRef id = reader.document(doc).getBinaryValue("_id"); String actualId = Uid.decodeId(id.bytes, id.offset, id.length); assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID, targetShardId); } if (lastDoc < ctx.reader().maxDoc()) { // check the last docs in the segment and make sure they all have the right shard id while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { long shardID = shard_id.nextValue(); assertEquals(shardID, targetShardId); numActual++; } } assertEquals(numExpected, numActual); } } } }
From source file:org.elasticsearch.indices.cache.query.IndicesQueryCacheTests.java
License:Apache License
public void testBasics() throws IOException { Directory dir = newDirectory();//from w ww . j a v a 2 s.c o m IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); w.addDocument(new Document()); DirectoryReader r = DirectoryReader.open(w, false); w.close(); ShardId shard = new ShardId(new Index("index"), 0); r = ElasticsearchDirectoryReader.wrap(r, shard); IndexSearcher s = new IndexSearcher(r); s.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); Settings settings = Settings.builder().put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT, 10).build(); IndicesQueryCache cache = new IndicesQueryCache(settings); s.setQueryCache(cache); QueryCacheStats stats = cache.getStats(shard); assertEquals(0L, stats.getCacheSize()); assertEquals(0L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); assertEquals(0L, stats.getMissCount()); assertEquals(1, s.count(new DummyQuery(0))); stats = cache.getStats(shard); assertEquals(1L, stats.getCacheSize()); assertEquals(1L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); assertEquals(1L, stats.getMissCount()); for (int i = 1; i < 20; ++i) { assertEquals(1, s.count(new DummyQuery(i))); } stats = cache.getStats(shard); assertEquals(10L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); assertEquals(20L, stats.getMissCount()); s.count(new DummyQuery(10)); stats = cache.getStats(shard); assertEquals(10L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(1L, stats.getHitCount()); assertEquals(20L, stats.getMissCount()); IOUtils.close(r, dir); // got emptied, but no changes to other metrics stats = cache.getStats(shard); assertEquals(0L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(1L, stats.getHitCount()); assertEquals(20L, stats.getMissCount()); cache.onClose(shard); // forgot everything stats = cache.getStats(shard); assertEquals(0L, stats.getCacheSize()); assertEquals(0L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); assertEquals(0L, stats.getMissCount()); cache.close(); // this triggers some assertions }
From source file:org.elasticsearch.indices.cache.query.IndicesQueryCacheTests.java
License:Apache License
public void testTwoShards() throws IOException { Directory dir1 = newDirectory();/*from w w w .j a v a 2s.c o m*/ IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig()); w1.addDocument(new Document()); DirectoryReader r1 = DirectoryReader.open(w1, false); w1.close(); ShardId shard1 = new ShardId(new Index("index"), 0); r1 = ElasticsearchDirectoryReader.wrap(r1, shard1); IndexSearcher s1 = new IndexSearcher(r1); s1.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); Directory dir2 = newDirectory(); IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig()); w2.addDocument(new Document()); DirectoryReader r2 = DirectoryReader.open(w2, false); w2.close(); ShardId shard2 = new ShardId(new Index("index"), 1); r2 = ElasticsearchDirectoryReader.wrap(r2, shard2); IndexSearcher s2 = new IndexSearcher(r2); s2.setQueryCachingPolicy(QueryCachingPolicy.ALWAYS_CACHE); Settings settings = Settings.builder().put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT, 10).build(); IndicesQueryCache cache = new IndicesQueryCache(settings); s1.setQueryCache(cache); s2.setQueryCache(cache); assertEquals(1, s1.count(new DummyQuery(0))); QueryCacheStats stats1 = cache.getStats(shard1); assertEquals(1L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(1L, stats1.getMissCount()); QueryCacheStats stats2 = cache.getStats(shard2); assertEquals(0L, stats2.getCacheSize()); assertEquals(0L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); assertEquals(0L, stats2.getMissCount()); assertEquals(1, s2.count(new DummyQuery(0))); stats1 = cache.getStats(shard1); assertEquals(1L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(1L, stats1.getMissCount()); stats2 = cache.getStats(shard2); assertEquals(1L, stats2.getCacheSize()); assertEquals(1L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); assertEquals(1L, stats2.getMissCount()); for (int i = 0; i < 20; ++i) { assertEquals(1, s2.count(new DummyQuery(i))); } stats1 = cache.getStats(shard1); assertEquals(0L, stats1.getCacheSize()); // evicted assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(1L, stats1.getMissCount()); stats2 = cache.getStats(shard2); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); assertEquals(20L, stats2.getMissCount()); IOUtils.close(r1, dir1); // no changes stats1 = cache.getStats(shard1); assertEquals(0L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(1L, stats1.getMissCount()); stats2 = cache.getStats(shard2); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); assertEquals(20L, stats2.getMissCount()); cache.onClose(shard1); // forgot everything about shard1 stats1 = cache.getStats(shard1); assertEquals(0L, stats1.getCacheSize()); assertEquals(0L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(0L, stats1.getMissCount()); stats2 = cache.getStats(shard2); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); assertEquals(20L, stats2.getMissCount()); IOUtils.close(r2, dir2); cache.onClose(shard2); // forgot everything about shard2 stats1 = cache.getStats(shard1); assertEquals(0L, stats1.getCacheSize()); assertEquals(0L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); assertEquals(0L, stats1.getMissCount()); stats2 = cache.getStats(shard2); assertEquals(0L, stats2.getCacheSize()); assertEquals(0L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); assertEquals(0L, stats2.getMissCount()); cache.close(); // this triggers some assertions }