Example usage for org.apache.lucene.search IndexSearcher getTopReaderContext

List of usage examples for org.apache.lucene.search IndexSearcher getTopReaderContext

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher getTopReaderContext.

Prototype


public IndexReaderContext getTopReaderContext() 

Source Link

Document

Returns this searchers the top-level IndexReaderContext .

Usage

From source file:doser.lucene.query.TermQuery.java

License:Apache License

@Override
public Weight createWeight(final IndexSearcher searcher) throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermContext termState;
    if ((perReaderTermS == null) || (perReaderTermS.topReaderContext != context)) {
        // make TermQuery single-pass if we don't have a PRTS or if the
        // context differs!
        termState = TermContext.build(context, term);
    } else {//from  ww  w.  ja va2 s  .  c o  m
        // PRTS was pre-build for this IS
        termState = perReaderTermS;
    }

    // we must not ignore the given docFreq - if set use the given value
    // (lie)
    if (docFreq != -1) {
        termState.setDocFreq(docFreq);
    }

    return new TermWeight(searcher, termState);
}

From source file:io.crate.execution.engine.collect.collectors.LuceneBatchIterator.java

License:Apache License

LuceneBatchIterator(IndexSearcher indexSearcher, Query query, @Nullable Float minScore, boolean doScores,
        CollectorContext collectorContext, RamAccountingContext ramAccountingContext,
        List<? extends Input<?>> inputs, Collection<? extends LuceneCollectorExpression<?>> expressions) {
    this.indexSearcher = indexSearcher;
    this.query = query;
    this.doScores = doScores || minScore != null;
    this.minScore = minScore;
    this.collectorContext = collectorContext;
    this.visitor = collectorContext.visitor();
    this.ramAccountingContext = ramAccountingContext;
    this.row = new InputRow(inputs);
    this.expressions = expressions.toArray(new LuceneCollectorExpression[0]);
    leaves = indexSearcher.getTopReaderContext().leaves();
    leavesIt = leaves.iterator();/* w  ww . ja va2 s .c  o m*/
}

From source file:io.crate.operation.collect.collectors.LuceneBatchIterator.java

License:Apache License

LuceneBatchIterator(IndexSearcher indexSearcher, Query query, @Nullable Float minScore, boolean doScores,
        CollectorContext collectorContext, RamAccountingContext ramAccountingContext,
        List<? extends Input<?>> inputs, Collection<? extends LuceneCollectorExpression<?>> expressions) {
    this.indexSearcher = indexSearcher;
    this.query = query;
    this.doScores = doScores || minScore != null;
    this.minScore = minScore;
    this.collectorContext = collectorContext;
    this.visitor = collectorContext.visitor();
    this.ramAccountingContext = ramAccountingContext;
    this.inputs = Columns.wrap(inputs);
    this.expressions = expressions.toArray(new LuceneCollectorExpression[0]);
    leaves = indexSearcher.getTopReaderContext().leaves();
    leavesIt = leaves.iterator();/*from ww w. j  a v a2  s . c o m*/
}

From source file:org.apache.solr.ltr.LTRRescorer.java

License:Apache License

@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID)
        throws IOException {

    final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext().leaves();
    final int n = ReaderUtil.subIndex(docID, leafContexts);
    final LeafReaderContext context = leafContexts.get(n);
    final int deBasedDoc = docID - context.docBase;
    final Weight modelWeight = searcher.createNormalizedWeight(scoringQuery, true);
    return modelWeight.explain(context, deBasedDoc);
}

From source file:org.apache.solr.ltr.TestLTRScoringQuery.java

License:Apache License

private LTRScoringQuery.ModelWeight performQuery(TopDocs hits, IndexSearcher searcher, int docid,
        LTRScoringQuery model) throws IOException, ModelException {
    final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext().leaves();
    final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
    final LeafReaderContext context = leafContexts.get(n);
    final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

    final Weight weight = searcher.createNormalizedWeight(model, true);
    final Scorer scorer = weight.scorer(context);

    // rerank using the field final-score
    scorer.iterator().advance(deBasedDoc);
    scorer.score();/*  w  ww .j  av  a2s. c  o  m*/

    // assertEquals(42.0f, score, 0.0001);
    // assertTrue(weight instanceof AssertingWeight);
    // (AssertingIndexSearcher)
    assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
    final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
    return modelWeight;

}

From source file:org.apache.solr.ltr.TestSelectiveWeightCreation.java

License:Apache License

private LTRScoringQuery.ModelWeight performQuery(TopDocs hits, IndexSearcher searcher, int docid,
        LTRScoringQuery model) throws IOException, ModelException {
    final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext().leaves();
    final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
    final LeafReaderContext context = leafContexts.get(n);
    final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

    final Weight weight = searcher.createNormalizedWeight(model, true);
    final Scorer scorer = weight.scorer(context);

    // rerank using the field final-score
    scorer.iterator().advance(deBasedDoc);
    scorer.score();//  w  ww  .j  a  v a2 s  . com
    assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
    final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
    return modelWeight;

}

From source file:org.codelibs.elasticsearch.common.lucene.all.AllTermQuery.java

License:Apache License

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    if (needsScores == false) {
        return new TermQuery(term).createWeight(searcher, needsScores);
    }/*  w w  w. j  av  a 2s.  c  om*/
    final TermContext termStates = TermContext.build(searcher.getTopReaderContext(), term);
    final CollectionStatistics collectionStats = searcher.collectionStatistics(term.field());
    final TermStatistics termStats = searcher.termStatistics(term, termStates);
    final Similarity similarity = searcher.getSimilarity(needsScores);
    final SimWeight stats = similarity.computeWeight(collectionStats, termStats);
    return new Weight(this) {

        @Override
        public float getValueForNormalization() throws IOException {
            return stats.getValueForNormalization();
        }

        @Override
        public void normalize(float norm, float topLevelBoost) {
            stats.normalize(norm, topLevelBoost);
        }

        @Override
        public void extractTerms(Set<Term> terms) {
            terms.add(term);
        }

        @Override
        public Explanation explain(LeafReaderContext context, int doc) throws IOException {
            AllTermScorer scorer = scorer(context);
            if (scorer != null) {
                int newDoc = scorer.iterator().advance(doc);
                if (newDoc == doc) {
                    float score = scorer.score();
                    float freq = scorer.freq();
                    SimScorer docScorer = similarity.simScorer(stats, context);
                    Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq);
                    Explanation termScoreExplanation = docScorer.explain(doc, freqExplanation);
                    Explanation payloadBoostExplanation = Explanation.match(scorer.payloadBoost(),
                            "payloadBoost=" + scorer.payloadBoost());
                    return Explanation.match(score,
                            "weight(" + getQuery() + " in " + doc + ") ["
                                    + similarity.getClass().getSimpleName() + "], product of:",
                            termScoreExplanation, payloadBoostExplanation);
                }
            }
            return Explanation.noMatch("no matching term");
        }

        @Override
        public AllTermScorer scorer(LeafReaderContext context) throws IOException {
            final Terms terms = context.reader().terms(term.field());
            if (terms == null) {
                return null;
            }
            final TermsEnum termsEnum = terms.iterator();
            if (termsEnum == null) {
                return null;
            }
            final TermState state = termStates.get(context.ord);
            if (state == null) {
                // Term does not exist in this segment
                return null;
            }
            termsEnum.seekExact(term.bytes(), state);
            PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS);
            assert docs != null;
            return new AllTermScorer(this, docs, similarity.simScorer(stats, context));
        }

    };
}

From source file:org.elasticsearch.index.search.child.CustomQueryWrappingFilter.java

License:Apache License

@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
    final SearchContext searchContext = SearchContext.current();
    if (docIdSets == null) {
        assert searcher == null;
        IndexSearcher searcher = searchContext.searcher();
        docIdSets = new IdentityHashMap<AtomicReader, DocIdSet>();
        this.searcher = searcher;
        searchContext.addReleasable(this);

        final Weight weight = searcher.createNormalizedWeight(query);
        for (final AtomicReaderContext leaf : searcher.getTopReaderContext().leaves()) {
            final DocIdSet set = DocIdSets.toCacheable(leaf.reader(), new DocIdSet() {
                @Override//www.j av a2  s.com
                public DocIdSetIterator iterator() throws IOException {
                    return weight.scorer(leaf, true, false, null);
                }

                @Override
                public boolean isCacheable() {
                    return false;
                }
            });
            docIdSets.put(leaf.reader(), set);
        }
    } else {
        assert searcher == SearchContext.current().searcher();
    }
    final DocIdSet set = docIdSets.get(context.reader());
    if (set != null && acceptDocs != null) {
        return BitsFilteredDocIdSet.wrap(set, acceptDocs);
    }
    return set;
}

From source file:org.elasticsearch.search.aggregations.AggregatorTestCase.java

License:Apache License

/**
 * Divides the provided {@link IndexSearcher} in sub-searcher, one for each segment,
 * builds an aggregator for each sub-searcher filtered by the provided {@link Query} and
 * returns the reduced {@link InternalAggregation}.
 *//*  w  w  w .  j a v  a 2  s.com*/
protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduce(IndexSearcher searcher,
        Query query, AggregationBuilder builder, MappedFieldType... fieldTypes) throws IOException {
    final IndexReaderContext ctx = searcher.getTopReaderContext();

    final ShardSearcher[] subSearchers;
    if (ctx instanceof LeafReaderContext) {
        subSearchers = new ShardSearcher[1];
        subSearchers[0] = new ShardSearcher((LeafReaderContext) ctx, ctx);
    } else {
        final CompositeReaderContext compCTX = (CompositeReaderContext) ctx;
        final int size = compCTX.leaves().size();
        subSearchers = new ShardSearcher[size];
        for (int searcherIDX = 0; searcherIDX < subSearchers.length; searcherIDX++) {
            final LeafReaderContext leave = compCTX.leaves().get(searcherIDX);
            subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX);
        }
    }

    List<InternalAggregation> aggs = new ArrayList<>();
    Query rewritten = searcher.rewrite(query);
    Weight weight = searcher.createWeight(rewritten, true);
    C root = createAggregator(builder, searcher, fieldTypes);
    try {
        for (ShardSearcher subSearcher : subSearchers) {
            C a = createAggregator(builder, subSearcher, fieldTypes);
            a.preCollection();
            subSearcher.search(weight, a);
            a.postCollection();
            aggs.add(a.buildAggregation(0L));
        }
        if (aggs.isEmpty()) {
            return null;
        } else {
            if (randomBoolean()) {
                // sometimes do an incremental reduce
                List<InternalAggregation> internalAggregations = randomSubsetOf(
                        randomIntBetween(1, aggs.size()), aggs);
                A internalAgg = (A) aggs.get(0).doReduce(internalAggregations,
                        new InternalAggregation.ReduceContext(root.context().bigArrays(), null, false));
                aggs.removeAll(internalAggregations);
                aggs.add(internalAgg);
            }
            // now do the final reduce
            @SuppressWarnings("unchecked")
            A internalAgg = (A) aggs.get(0).doReduce(aggs,
                    new InternalAggregation.ReduceContext(root.context().bigArrays(), null, true));
            return internalAgg;
        }
    } finally {
        Releasables.close(releasables);
        releasables.clear();
    }
}

From source file:org.elasticsearch.test.engine.MockRobinEngine.java

License:Apache License

@Override
protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager)
        throws EngineException {
    // this executes basic query checks and asserts that weights are normalized only once etc.
    final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(random,
            searcher.getTopReaderContext());
    assertingIndexSearcher.setSimilarity(searcher.getSimilarity());
    return new AssertingSearcher(super.newSearcher(source, assertingIndexSearcher, manager), shardId);
}