Example usage for org.apache.lucene.search Weight scorer

List of usage examples for org.apache.lucene.search Weight scorer

Introduction

In this page you can find the example usage for org.apache.lucene.search Weight scorer.

Prototype

public abstract Scorer scorer(LeafReaderContext context) throws IOException;

Source Link

Document

Returns a Scorer which can iterate in order over all matching documents and assign them a score.

Usage

From source file:com.floragunn.searchguard.configuration.DlsFlsFilterLeafReader.java

License:Open Source License

DlsFlsFilterLeafReader(final LeafReader delegate, final Set<String> includes, final Query dlsQuery) {
    super(delegate);

    flsEnabled = includes != null && !includes.isEmpty();
    dlsEnabled = dlsQuery != null;//  w w  w  .  ja  v a  2s.co m

    if (flsEnabled) {
        this.includes = includes.toArray(new String[0]);
        final FieldInfos infos = delegate.getFieldInfos();

        final List<FieldInfo> fi = new ArrayList<FieldInfo>(infos.size());
        for (final FieldInfo info : infos) {
            final String fname = info.name;
            if ((!WildcardMatcher.containsWildcard(fname) && includes.contains(fname))
                    || WildcardMatcher.matchAny(this.includes, fname)) {
                fi.add(info);
            }
        }

        this.flsFieldInfos = new FieldInfos(fi.toArray(new FieldInfo[0]));
    } else {
        this.includes = null;
        this.flsFieldInfos = null;
    }

    if (dlsEnabled) {
        try {

            //borrowed from Apache Lucene (Copyright Apache Software Foundation (ASF))
            final IndexSearcher searcher = new IndexSearcher(this);
            searcher.setQueryCache(null);
            final boolean needsScores = false;
            final Weight preserveWeight = searcher.createNormalizedWeight(dlsQuery, needsScores);

            final int maxDoc = in.maxDoc();
            final FixedBitSet bits = new FixedBitSet(maxDoc);
            final Scorer preverveScorer = preserveWeight.scorer(this.getContext());
            if (preverveScorer != null) {
                bits.or(preverveScorer.iterator());
            }

            if (in.hasDeletions()) {
                final Bits oldLiveDocs = in.getLiveDocs();
                assert oldLiveDocs != null;
                final DocIdSetIterator it = new BitSetIterator(bits, 0L);
                for (int i = it.nextDoc(); i != DocIdSetIterator.NO_MORE_DOCS; i = it.nextDoc()) {
                    if (!oldLiveDocs.get(i)) {
                        bits.clear(i);
                    }
                }
            }

            this.liveDocs = bits;
            this.numDocs = bits.cardinality();

        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    } else {
        this.liveDocs = null;
        this.numDocs = -1;
    }
}

From source file:com.o19s.es.ltr.logging.LoggingFetchSubPhase.java

License:Apache License

void doLog(Query query, List<HitLogConsumer> loggers, IndexSearcher searcher, SearchHit[] hits)
        throws IOException {
    // Reorder hits by id so we can scan all the docs belonging to the same
    // segment by reusing the same scorer.
    SearchHit[] reordered = new SearchHit[hits.length];
    System.arraycopy(hits, 0, reordered, 0, hits.length);
    Arrays.sort(reordered, Comparator.comparingInt(SearchHit::docId));

    int hitUpto = 0;
    int readerUpto = -1;
    int endDoc = 0;
    int docBase = 0;
    Scorer scorer = null;//from  w  ww  .j  a  v a  2s .co m
    Weight weight = searcher.createNormalizedWeight(query, true);
    // Loop logic borrowed from lucene QueryRescorer
    while (hitUpto < reordered.length) {
        SearchHit hit = reordered[hitUpto];
        int docID = hit.docId();
        loggers.forEach((l) -> l.nextDoc(hit));
        LeafReaderContext readerContext = null;
        while (docID >= endDoc) {
            readerUpto++;
            readerContext = searcher.getTopReaderContext().leaves().get(readerUpto);
            endDoc = readerContext.docBase + readerContext.reader().maxDoc();
        }

        if (readerContext != null) {
            // We advanced to another segment:
            docBase = readerContext.docBase;
            scorer = weight.scorer(readerContext);
        }

        if (scorer != null) {
            int targetDoc = docID - docBase;
            int actualDoc = scorer.docID();
            if (actualDoc < targetDoc) {
                actualDoc = scorer.iterator().advance(targetDoc);
            }
            if (actualDoc == targetDoc) {
                // Scoring will trigger log collection
                scorer.score();
            }
        }

        hitUpto++;
    }
}

From source file:org.apache.solr.ltr.TestLTRScoringQuery.java

License:Apache License

private LTRScoringQuery.ModelWeight performQuery(TopDocs hits, IndexSearcher searcher, int docid,
        LTRScoringQuery model) throws IOException, ModelException {
    final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext().leaves();
    final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
    final LeafReaderContext context = leafContexts.get(n);
    final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

    final Weight weight = searcher.createNormalizedWeight(model, true);
    final Scorer scorer = weight.scorer(context);

    // rerank using the field final-score
    scorer.iterator().advance(deBasedDoc);
    scorer.score();/*w w  w  . j a v a  2 s.c om*/

    // assertEquals(42.0f, score, 0.0001);
    // assertTrue(weight instanceof AssertingWeight);
    // (AssertingIndexSearcher)
    assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
    final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
    return modelWeight;

}

From source file:org.apache.solr.ltr.TestSelectiveWeightCreation.java

License:Apache License

private LTRScoringQuery.ModelWeight performQuery(TopDocs hits, IndexSearcher searcher, int docid,
        LTRScoringQuery model) throws IOException, ModelException {
    final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext().leaves();
    final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
    final LeafReaderContext context = leafContexts.get(n);
    final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

    final Weight weight = searcher.createNormalizedWeight(model, true);
    final Scorer scorer = weight.scorer(context);

    // rerank using the field final-score
    scorer.iterator().advance(deBasedDoc);
    scorer.score();/*from   w w  w  .ja v a  2s  .  c om*/
    assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
    final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
    return modelWeight;

}

From source file:org.apache.solr.search.QueryWrapperFilter.java

License:Apache License

@Override
public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException {
    // get a private context that is used to rewrite, createWeight and score eventually
    final LeafReaderContext privateContext = context.reader().getContext();
    final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query, false);

    DocIdSet set = new DocIdSet() {
        @Override/* w w  w.  j a v a  2  s  .c  o  m*/
        public DocIdSetIterator iterator() throws IOException {
            Scorer scorer = weight.scorer(privateContext);
            return scorer == null ? null : scorer.iterator();
        }

        @Override
        public long ramBytesUsed() {
            return 0L;
        }
    };
    return BitsFilteredDocIdSet.wrap(set, acceptDocs);
}

From source file:org.apache.solr.search.TestQueryWrapperFilter.java

License:Apache License

public void testQueryWrapperFilterPropagatesApproximations() throws IOException {
    Directory dir = newDirectory();//from w  ww. j  av a 2s  . c o  m
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new StringField("foo", "bar", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    final IndexReader reader = writer.getReader();
    writer.close();
    final IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null); // to still have approximations
    final Query query = new QueryWrapperFilter(
            new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()));
    final Weight weight = searcher.createNormalizedWeight(query, random().nextBoolean());
    final Scorer scorer = weight.scorer(reader.leaves().get(0));
    assertNotNull(scorer.twoPhaseIterator());
    reader.close();
    dir.close();
}

From source file:org.codelibs.elasticsearch.common.lucene.index.FilterableTermsEnum.java

License:Apache License

public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter)
        throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }/*from w w w.  ja  v  a 2 s.  c o m*/
    this.docsEnumFlag = docsEnumFlag;
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            bits = BitSet.of(docs, context.reader().maxDoc());
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}

From source file:org.codelibs.elasticsearch.common.lucene.Lucene.java

License:Apache License

/**
 * Check whether there is one or more documents matching the provided query.
 *///www . j a  va2 s .  c om
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
    final Weight weight = searcher.createNormalizedWeight(query, false);
    // the scorer API should be more efficient at stopping after the first
    // match than the bulk scorer API
    for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
        final Scorer scorer = weight.scorer(context);
        if (scorer == null) {
            continue;
        }
        final Bits liveDocs = context.reader().getLiveDocs();
        final DocIdSetIterator iterator = scorer.iterator();
        for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
            if (liveDocs == null || liveDocs.get(doc)) {
                return true;
            }
        }
    }
    return false;
}

From source file:org.codelibs.elasticsearch.search.aggregations.bucket.nested.NestedAggregator.java

License:Apache License

@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub)
        throws IOException {
    IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
    IndexSearcher searcher = new IndexSearcher(topLevelContext);
    searcher.setQueryCache(null);/*from  www. j  a  va2  s  . c om*/
    Weight weight = searcher.createNormalizedWeight(childFilter, false);
    Scorer childDocsScorer = weight.scorer(ctx);

    final BitSet parentDocs = parentFilter.getBitSet(ctx);
    final DocIdSetIterator childDocs = childDocsScorer != null ? childDocsScorer.iterator() : null;
    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int parentDoc, long bucket) throws IOException {
            // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent
            // doc), so we can skip:
            if (parentDoc == 0 || parentDocs == null || childDocs == null) {
                return;
            }

            final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
            int childDocId = childDocs.docID();
            if (childDocId <= prevParentDoc) {
                childDocId = childDocs.advance(prevParentDoc + 1);
            }

            for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
                collectBucket(sub, childDocId, bucket);
            }
        }
    };
}

From source file:org.elasticsearch.common.lucene.index.FilterableTermsEnum.java

License:Apache License

public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter)
        throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }/* www . j  a v  a  2  s . co m*/
    this.docsEnumFlag = docsEnumFlag;
    if (filter == null) {
        // Important - need to use the doc count that includes deleted docs
        // or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
        numDocs = reader.maxDoc();
    }
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
            builder.or(docs);
            bits = builder.build().bits();

            // Count how many docs are in our filtered set
            // TODO make this lazy-loaded only for those that need it?
            numDocs += bits.cardinality();
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}