Example usage for org.apache.lucene.search IndexSearcher setQueryCache

List of usage examples for org.apache.lucene.search IndexSearcher setQueryCache

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher setQueryCache.

Prototype

public void setQueryCache(QueryCache queryCache) 

Source Link

Document

Set the QueryCache to use when scores are not needed.

Usage

From source file:DVBench.java

License:Apache License

static void doBench(int bpv) throws Exception {
    File file = new File("/data/indices/dvbench");
    file.mkdirs();//  ww  w  . ja  va2  s . c o  m
    Directory dir = FSDirectory.open(file);
    IndexWriterConfig config = new IndexWriterConfig(null);
    config.setOpenMode(OpenMode.CREATE);
    config.setMergeScheduler(new SerialMergeScheduler());
    config.setMergePolicy(new LogDocMergePolicy());
    config.setMaxBufferedDocs(25000);
    IndexWriter writer = new IndexWriter(dir, config);

    MyRandom r = new MyRandom();
    int numdocs = 400000;
    Document doc = new Document();
    Field dv = new NumericDocValuesField("dv", 0);
    Field inv = new LongField("inv", 0, Field.Store.NO);
    Field boxed = new BinaryDocValuesField("boxed", new BytesRef(8));
    Field boxed2 = new BinaryDocValuesField("boxed2", new BytesRef(8));

    doc.add(dv);
    doc.add(inv);
    doc.add(boxed);
    doc.add(boxed2);
    for (int i = 0; i < numdocs; i++) {
        // defeat blockpackedwriter
        final long value;
        if (i % 8192 == 0) {
            value = bpv == 64 ? Long.MIN_VALUE : 0;
        } else if (i % 8192 == 1) {
            value = bpv == 64 ? Long.MAX_VALUE : (1L << bpv) - 1;
        } else {
            value = r.nextLong(bpv);
        }
        dv.setLongValue(value);
        inv.setLongValue(value);
        box(value, boxed.binaryValue());
        box(value, boxed2.binaryValue());
        boxed2.binaryValue().length = (bpv + 7) / 8; // fixed length
        writer.addDocument(doc);
    }

    writer.close();

    // run dv search tests
    String description = "dv (bpv=" + bpv + ")";
    DirectoryReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null); // don't bench the cache

    int hash = 0;
    // warmup
    hash += search(description, searcher, "dv", 300, true);
    hash += search(description, searcher, "dv", 300, false);

    // Uninverting
    Map<String, UninvertingReader.Type> mapping = Collections.singletonMap("inv", UninvertingReader.Type.LONG);
    DirectoryReader uninv = UninvertingReader.wrap(reader, mapping);
    IndexSearcher searcher2 = new IndexSearcher(uninv);
    searcher2.setQueryCache(null); // don't bench the cache

    description = "fc (bpv=" + bpv + ")";
    // warmup
    hash += search(description, searcher2, "inv", 300, true);
    hash += search(description, searcher2, "inv", 300, false);

    // Boxed inside binary
    DirectoryReader boxedReader = new BinaryAsVLongReader(reader);
    IndexSearcher searcher3 = new IndexSearcher(boxedReader);
    searcher3.setQueryCache(null); // don't bench the cache
    description = "boxed (bpv=" + bpv + ")";
    // warmup
    hash += search(description, searcher3, "boxed", 300, true);
    hash += search(description, searcher3, "boxed", 300, false);

    description = "boxed fixed-length (bpv=" + bpv + ")";
    // warmup
    hash += search(description, searcher3, "boxed2", 300, true);
    hash += search(description, searcher3, "boxed2", 300, false);

    if (hash == 3) {
        // wont happen
        System.out.println("hash=" + hash);
    }
    reader.close();
    dir.close();
}

From source file:com.floragunn.searchguard.configuration.DlsFlsFilterLeafReader.java

License:Open Source License

DlsFlsFilterLeafReader(final LeafReader delegate, final Set<String> includes, final Query dlsQuery) {
    super(delegate);

    flsEnabled = includes != null && !includes.isEmpty();
    dlsEnabled = dlsQuery != null;/* w  w w  .  j a  va  2  s.  co m*/

    if (flsEnabled) {
        this.includes = includes.toArray(new String[0]);
        final FieldInfos infos = delegate.getFieldInfos();

        final List<FieldInfo> fi = new ArrayList<FieldInfo>(infos.size());
        for (final FieldInfo info : infos) {
            final String fname = info.name;
            if ((!WildcardMatcher.containsWildcard(fname) && includes.contains(fname))
                    || WildcardMatcher.matchAny(this.includes, fname)) {
                fi.add(info);
            }
        }

        this.flsFieldInfos = new FieldInfos(fi.toArray(new FieldInfo[0]));
    } else {
        this.includes = null;
        this.flsFieldInfos = null;
    }

    if (dlsEnabled) {
        try {

            //borrowed from Apache Lucene (Copyright Apache Software Foundation (ASF))
            final IndexSearcher searcher = new IndexSearcher(this);
            searcher.setQueryCache(null);
            final boolean needsScores = false;
            final Weight preserveWeight = searcher.createNormalizedWeight(dlsQuery, needsScores);

            final int maxDoc = in.maxDoc();
            final FixedBitSet bits = new FixedBitSet(maxDoc);
            final Scorer preverveScorer = preserveWeight.scorer(this.getContext());
            if (preverveScorer != null) {
                bits.or(preverveScorer.iterator());
            }

            if (in.hasDeletions()) {
                final Bits oldLiveDocs = in.getLiveDocs();
                assert oldLiveDocs != null;
                final DocIdSetIterator it = new BitSetIterator(bits, 0L);
                for (int i = it.nextDoc(); i != DocIdSetIterator.NO_MORE_DOCS; i = it.nextDoc()) {
                    if (!oldLiveDocs.get(i)) {
                        bits.clear(i);
                    }
                }
            }

            this.liveDocs = bits;
            this.numDocs = bits.cardinality();

        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    } else {
        this.liveDocs = null;
        this.numDocs = -1;
    }
}

From source file:org.apache.solr.search.TestQueryWrapperFilter.java

License:Apache License

public void testQueryWrapperFilterPropagatesApproximations() throws IOException {
    Directory dir = newDirectory();/*from  ww w .j a  v a  2  s  .  co  m*/
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new StringField("foo", "bar", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    final IndexReader reader = writer.getReader();
    writer.close();
    final IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null); // to still have approximations
    final Query query = new QueryWrapperFilter(
            new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()));
    final Weight weight = searcher.createNormalizedWeight(query, random().nextBoolean());
    final Scorer scorer = weight.scorer(reader.leaves().get(0));
    assertNotNull(scorer.twoPhaseIterator());
    reader.close();
    dir.close();
}

From source file:org.codelibs.elasticsearch.common.lucene.index.FilterableTermsEnum.java

License:Apache License

public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter)
        throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }/*from w w  w .  jav  a2  s .c  om*/
    this.docsEnumFlag = docsEnumFlag;
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            bits = BitSet.of(docs, context.reader().maxDoc());
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}

From source file:org.codelibs.elasticsearch.search.aggregations.bucket.nested.NestedAggregator.java

License:Apache License

@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub)
        throws IOException {
    IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
    IndexSearcher searcher = new IndexSearcher(topLevelContext);
    searcher.setQueryCache(null);
    Weight weight = searcher.createNormalizedWeight(childFilter, false);
    Scorer childDocsScorer = weight.scorer(ctx);

    final BitSet parentDocs = parentFilter.getBitSet(ctx);
    final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
    return new LeafBucketCollectorBase(sub, null) {
        @Override/*from www.  ja  v  a2  s  .  c om*/
        public void collect(int parentDoc, long bucket) throws IOException {
            // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
            // doc), so we can skip:
            if (parentDoc == 0 || parentDocs == null || childDocs == null) {
                return;
            }

            final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
            int childDocId = childDocs.docID();
            if (childDocId <= prevParentDoc) {
                childDocId = childDocs.advance(prevParentDoc + 1);
            }

            for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
                collectBucket(sub, childDocId, bucket);
            }
        }
    };
}

From source file:org.elasticsearch.common.lucene.index.FilterableTermsEnum.java

License:Apache License

public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter)
        throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }// w  w w . j a va2s.com
    this.docsEnumFlag = docsEnumFlag;
    if (filter == null) {
        // Important - need to use the doc count that includes deleted docs
        // or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
        numDocs = reader.maxDoc();
    }
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
            builder.or(docs);
            bits = builder.build().bits();

            // Count how many docs are in our filtered set
            // TODO make this lazy-loaded only for those that need it?
            numDocs += bits.cardinality();
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}

From source file:org.elasticsearch.common.lucene.IndexCacheableQueryTests.java

License:Apache License

public void testCache() throws IOException {
    Directory dir = newDirectory();//  ww  w  .  j  a va 2  s.  c om
    LRUQueryCache cache = new LRUQueryCache(10000, Long.MAX_VALUE);
    QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE;
    RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir);
    for (int i = 0; i < 10; ++i) {
        writer.addDocument(new Document());
    }

    IndexReader reader = writer.getReader();
    IndexSearcher searcher = newSearcher(reader);
    reader = searcher.getIndexReader(); // reader might be wrapped
    searcher.setQueryCache(cache);
    searcher.setQueryCachingPolicy(policy);

    assertEquals(0, cache.getCacheSize());
    DummyIndexCacheableQuery query = new DummyIndexCacheableQuery();
    searcher.count(query);
    int expectedCacheSize = reader.leaves().size();
    assertEquals(expectedCacheSize, cache.getCacheSize());
    searcher.count(query);
    assertEquals(expectedCacheSize, cache.getCacheSize());

    writer.addDocument(new Document());

    IndexReader reader2 = writer.getReader();
    searcher = newSearcher(reader2);
    reader2 = searcher.getIndexReader(); // reader might be wrapped
    searcher.setQueryCache(cache);
    searcher.setQueryCachingPolicy(policy);

    // since the query is only cacheable at the index level, it has to be recomputed on all leaves
    expectedCacheSize += reader2.leaves().size();
    searcher.count(query);
    assertEquals(expectedCacheSize, cache.getCacheSize());
    searcher.count(query);
    assertEquals(expectedCacheSize, cache.getCacheSize());

    reader.close();
    reader2.close();
    writer.close();
    assertEquals(0, cache.getCacheSize());
    dir.close();
}

From source file:org.elasticsearch.index.cache.bitset.BitsetFilterCache.java

License:Apache License

private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context)
        throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }//from ww w  .j a  v a 2 s .c o m
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query, new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}

From source file:org.elasticsearch.index.engine.EngineSearcherFactory.java

License:Apache License

@Override
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
    IndexSearcher searcher = super.newSearcher(reader, previousReader);
    searcher.setQueryCache(engineConfig.getQueryCache());
    searcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
    searcher.setSimilarity(engineConfig.getSimilarity());
    return searcher;
}

From source file:org.elasticsearch.index.engine.IndexSearcherWrappingService.java

License:Apache License

/**
 * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher
 * gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned.
 *
 * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
 *//*from  ww  w .  j a va 2s  .c  o  m*/
public final Engine.Searcher wrap(EngineConfig engineConfig, final Engine.Searcher engineSearcher)
        throws IOException {
    final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader
            .getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
    if (elasticsearchDirectoryReader == null) {
        throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
    }
    if (wrapper == null) {
        return engineSearcher;
    }
    NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(
            engineSearcher.getDirectoryReader());
    DirectoryReader reader = wrapper.wrap(nonClosingReaderWrapper);
    if (reader != nonClosingReaderWrapper) {
        if (reader.getCoreCacheKey() != elasticsearchDirectoryReader.getCoreCacheKey()) {
            throw new IllegalStateException(
                    "wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate"
                            + " to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs");
        }
        if (ElasticsearchDirectoryReader
                .getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
            // prevent that somebody wraps with a non-filter reader
            throw new IllegalStateException(
                    "wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't");
        }
    }

    final IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
    innerIndexSearcher.setQueryCache(engineConfig.getQueryCache());
    innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
    innerIndexSearcher.setSimilarity(engineConfig.getSimilarity());
    // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
    // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
    // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
    final IndexSearcher indexSearcher = wrapper.wrap(engineConfig, innerIndexSearcher);
    if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) {
        return engineSearcher;
    } else {
        return new Engine.Searcher(engineSearcher.source(), indexSearcher) {
            @Override
            public void close() throws ElasticsearchException {
                try {
                    reader().close();
                    // we close the reader to make sure wrappers can release resources if needed....
                    // our NonClosingReaderWrapper makes sure that our reader is not closed
                } catch (IOException e) {
                    throw new ElasticsearchException("failed to close reader", e);
                } finally {
                    engineSearcher.close();
                }

            }
        };
    }
}