Example usage for org.apache.lucene.search Scorer twoPhaseIterator

List of usage examples for org.apache.lucene.search Scorer twoPhaseIterator

Introduction

In this page you can find the example usage for org.apache.lucene.search Scorer twoPhaseIterator.

Prototype

public TwoPhaseIterator twoPhaseIterator() 

Source Link

Document

Optional method: Return a TwoPhaseIterator view of this Scorer .

Usage

From source file:io.crate.lucene.GenericFunctionQuery.java

License:Apache License

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    return new Weight(this) {
        @Override//from w  w  w  . j  a v  a  2 s.  c  o  m
        public void extractTerms(Set<Term> terms) {
        }

        @Override
        public Explanation explain(LeafReaderContext context, int doc) throws IOException {
            final Scorer s = scorer(context);
            final boolean match;
            final TwoPhaseIterator twoPhase = s.twoPhaseIterator();
            if (twoPhase == null) {
                match = s.iterator().advance(doc) == doc;
            } else {
                match = twoPhase.approximation().advance(doc) == doc && twoPhase.matches();
            }
            if (match) {
                assert s.score() == 0f : "score must be 0";
                return Explanation.match(0f, "Match on id " + doc);
            } else {
                return Explanation.match(0f, "No match on id " + doc);
            }
        }

        @Override
        public float getValueForNormalization() throws IOException {
            return 0;
        }

        @Override
        public void normalize(float norm, float boost) {
        }

        @Override
        public Scorer scorer(LeafReaderContext context) throws IOException {
            return new ConstantScoreScorer(this, 0f, getTwoPhaseIterator(context));
        }
    };
}

From source file:org.apache.solr.search.TestQueryWrapperFilter.java

License:Apache License

public void testQueryWrapperFilterPropagatesApproximations() throws IOException {
    Directory dir = newDirectory();//from   w  w  w  .j  av  a  2  s  .  c  o  m
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new StringField("foo", "bar", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    final IndexReader reader = writer.getReader();
    writer.close();
    final IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null); // to still have approximations
    final Query query = new QueryWrapperFilter(
            new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()));
    final Weight weight = searcher.createNormalizedWeight(query, random().nextBoolean());
    final Scorer scorer = weight.scorer(reader.leaves().get(0));
    assertNotNull(scorer.twoPhaseIterator());
    reader.close();
    dir.close();
}

From source file:org.codelibs.elasticsearch.common.lucene.Lucene.java

License:Apache License

/**
 * Given a {Scorer}, return a {Bits} instance that will match
 * all documents contained in the set. Note that the returned {Bits}
 * instance MUST be consumed in order./*from  w ww . j  a  v  a  2s .  c  o m*/
 */
public static Bits asSequentialAccessBits(final int maxDoc, @Nullable Scorer scorer) throws IOException {
    if (scorer == null) {
        return new Bits.MatchNoBits(maxDoc);
    }
    final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
    final DocIdSetIterator iterator;
    if (twoPhase == null) {
        iterator = scorer.iterator();
    } else {
        iterator = twoPhase.approximation();
    }

    return new Bits() {

        int previous = -1;
        boolean previousMatched = false;

        @Override
        public boolean get(int index) {
            if (index < 0 || index >= maxDoc) {
                throw new IndexOutOfBoundsException(index + " is out of bounds: [" + 0 + "-" + maxDoc + "[");
            }
            if (index < previous) {
                throw new IllegalArgumentException("This Bits instance can only be consumed in order. "
                        + "Got called on [" + index + "] while previously called on [" + previous + "]");
            }
            if (index == previous) {
                // we cache whether it matched because it is illegal to call
                // twoPhase.matches() twice
                return previousMatched;
            }
            previous = index;

            int doc = iterator.docID();
            if (doc < index) {
                try {
                    doc = iterator.advance(index);
                } catch (IOException e) {
                    throw new IllegalStateException("Cannot advance iterator", e);
                }
            }
            if (index == doc) {
                try {
                    return previousMatched = twoPhase == null || twoPhase.matches();
                } catch (IOException e) {
                    throw new IllegalStateException("Cannot validate match", e);
                }
            }
            return previousMatched = false;
        }

        @Override
        public int length() {
            return maxDoc;
        }
    };
}

From source file:org.elasticsearch.index.query.functionscore.FunctionScoreTests.java

License:Apache License

public void testPropagatesApproximations() throws IOException {
    Query query = new RandomApproximationQuery(new MatchAllDocsQuery(), random());
    IndexSearcher searcher = newSearcher(reader);
    searcher.setQueryCache(null); // otherwise we could get a cached entry that does not have approximations

    FunctionScoreQuery fsq = new FunctionScoreQuery(query, null, null);
    for (boolean needsScores : new boolean[] { true, false }) {
        Weight weight = searcher.createWeight(fsq, needsScores);
        Scorer scorer = weight.scorer(reader.leaves().get(0));
        assertNotNull(scorer.twoPhaseIterator());
    }/*  ww w .j  av  a2 s .  com*/

    FiltersFunctionScoreQuery ffsq = new FiltersFunctionScoreQuery(query, ScoreMode.Sum, new FilterFunction[0],
            Float.POSITIVE_INFINITY, null);
    for (boolean needsScores : new boolean[] { true, false }) {
        Weight weight = searcher.createWeight(ffsq, needsScores);
        Scorer scorer = weight.scorer(reader.leaves().get(0));
        assertNotNull(scorer.twoPhaseIterator());
    }
}

From source file:org.elasticsearch.index.query.PercolateQuery.java

License:Apache License

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    final Weight innerWeight = percolatorQueriesQuery.createWeight(searcher, needsScores);
    return new Weight(this) {
        @Override//from ww w .j a va2 s . c om
        public void extractTerms(Set<Term> set) {
        }

        @Override
        public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
            Scorer scorer = scorer(leafReaderContext);
            if (scorer != null) {
                TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator();
                int result = twoPhaseIterator.approximation().advance(docId);
                if (result == docId) {
                    if (twoPhaseIterator.matches()) {
                        if (needsScores) {
                            QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext);
                            Query query = percolatorQueries.getQuery(docId);
                            Explanation detail = percolatorIndexSearcher.explain(query, 0);
                            return Explanation.match(scorer.score(), "PercolateQuery", detail);
                        } else {
                            return Explanation.match(scorer.score(), "PercolateQuery");
                        }
                    }
                }
            }
            return Explanation.noMatch("PercolateQuery");
        }

        @Override
        public float getValueForNormalization() throws IOException {
            return innerWeight.getValueForNormalization();
        }

        @Override
        public void normalize(float v, float v1) {
            innerWeight.normalize(v, v1);
        }

        @Override
        public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException {
            final Scorer approximation = innerWeight.scorer(leafReaderContext);
            if (approximation == null) {
                return null;
            }

            final QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext);
            if (needsScores) {
                return new BaseScorer(this, approximation, percolatorQueries, percolatorIndexSearcher) {

                    float score;

                    @Override
                    boolean matchDocId(int docId) throws IOException {
                        Query query = percolatorQueries.getQuery(docId);
                        if (query != null) {
                            TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
                            if (topDocs.totalHits > 0) {
                                score = topDocs.scoreDocs[0].score;
                                return true;
                            } else {
                                return false;
                            }
                        } else {
                            return false;
                        }
                    }

                    @Override
                    public float score() throws IOException {
                        return score;
                    }
                };
            } else {
                return new BaseScorer(this, approximation, percolatorQueries, percolatorIndexSearcher) {

                    @Override
                    public float score() throws IOException {
                        return 0f;
                    }

                    boolean matchDocId(int docId) throws IOException {
                        Query query = percolatorQueries.getQuery(docId);
                        return query != null && Lucene.exists(percolatorIndexSearcher, query);
                    }
                };
            }
        }
    };
}

From source file:org.elasticsearch.percolator.PercolateQuery.java

License:Apache License

@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, false);
    final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, false);
    return new Weight(this) {
        @Override//from   w ww .j ava  2  s.c  om
        public void extractTerms(Set<Term> set) {
        }

        @Override
        public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException {
            Scorer scorer = scorer(leafReaderContext);
            if (scorer != null) {
                TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator();
                int result = twoPhaseIterator.approximation().advance(docId);
                if (result == docId) {
                    if (twoPhaseIterator.matches()) {
                        if (needsScores) {
                            QueryStore.Leaf percolatorQueries = queryStore.getQueries(leafReaderContext);
                            Query query = percolatorQueries.getQuery(docId);
                            Explanation detail = percolatorIndexSearcher.explain(query, 0);
                            return Explanation.match(scorer.score(), "PercolateQuery", detail);
                        } else {
                            return Explanation.match(scorer.score(), "PercolateQuery");
                        }
                    }
                }
            }
            return Explanation.noMatch("PercolateQuery");
        }

        @Override
        public float getValueForNormalization() throws IOException {
            return candidateMatchesWeight.getValueForNormalization();
        }

        @Override
        public void normalize(float v, float v1) {
            candidateMatchesWeight.normalize(v, v1);
        }

        @Override
        public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException {
            final Scorer approximation = candidateMatchesWeight.scorer(leafReaderContext);
            if (approximation == null) {
                return null;
            }

            final QueryStore.Leaf queries = queryStore.getQueries(leafReaderContext);
            if (needsScores) {
                return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {

                    float score;

                    @Override
                    boolean matchDocId(int docId) throws IOException {
                        Query query = percolatorQueries.getQuery(docId);
                        if (query != null) {
                            TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
                            if (topDocs.totalHits > 0) {
                                score = topDocs.scoreDocs[0].score;
                                return true;
                            } else {
                                return false;
                            }
                        } else {
                            return false;
                        }
                    }

                    @Override
                    public float score() throws IOException {
                        return score;
                    }
                };
            } else {
                Scorer verifiedDocsScorer = verifiedMatchesWeight.scorer(leafReaderContext);
                Bits verifiedDocsBits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(),
                        verifiedDocsScorer);
                return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {

                    @Override
                    public float score() throws IOException {
                        return 0f;
                    }

                    boolean matchDocId(int docId) throws IOException {
                        // We use the verifiedDocsBits to skip the expensive MemoryIndex verification.
                        // If docId also appears in the verifiedDocsBits then that means during indexing
                        // we were able to extract all query terms and for this candidate match
                        // and we determined based on the nature of the query that it is safe to skip
                        // the MemoryIndex verification.
                        if (verifiedDocsBits.get(docId)) {
                            return true;
                        }
                        Query query = percolatorQueries.getQuery(docId);
                        return query != null && Lucene.exists(percolatorIndexSearcher, query);
                    }
                };
            }
        }
    };
}

From source file:org.hibernate.search.spatial.impl.ConstantScoreWeight.java

License:LGPL

@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
    final Scorer s = scorer(context);
    final boolean exists;
    if (s == null) {
        exists = false;/* w  w  w  .  ja  v a2  s . com*/
    } else {
        final TwoPhaseIterator twoPhase = s.twoPhaseIterator();
        if (twoPhase == null) {
            exists = s.iterator().advance(doc) == doc;
        } else {
            exists = twoPhase.approximation().advance(doc) == doc && twoPhase.matches();
        }
    }

    if (exists) {
        return Explanation.match(queryWeight, getQuery().toString() + ", product of:",
                Explanation.match(boost, "boost"), Explanation.match(queryNorm, "queryNorm"));
    } else {
        return Explanation.noMatch(getQuery().toString() + " doesn't match id " + doc);
    }
}

From source file:org.opengrok.suggest.query.customized.CustomSloppyPhraseScorerTest.java

License:Open Source License

@SuppressWarnings("unchecked") // for contains()
public static void test(final int slop, final int offset, final String[] terms,
        final Integer[] expectedPositions) throws IOException {
    Directory dir = new ByteBuffersDirectory();

    try (IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig())) {
        Document doc = new Document();
        doc.add(new TextField("test", "zero one two three four five six seven eight nine ten", Field.Store.NO));

        iw.addDocument(doc);//from   w  w  w.j a v  a2 s. c om
    }

    CustomPhraseQuery query = new CustomPhraseQuery(slop, "test", terms);
    query.offset = offset;

    try (IndexReader ir = DirectoryReader.open(dir)) {
        IndexSearcher is = new IndexSearcher(ir);

        Weight w = query.createWeight(is, false, 1);

        LeafReaderContext context = ir.getContext().leaves().get(0);

        Scorer scorer = w.scorer(context);

        TwoPhaseIterator it = scorer.twoPhaseIterator();

        int correctDoc = -1;

        int docId;
        while ((docId = it.approximation().nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
            if (it.matches()) {
                correctDoc = docId;
            }
        }

        BitIntsHolder bs = (BitIntsHolder) ((PhraseScorer) scorer).getPositions(correctDoc);

        assertThat(toSet(bs), contains(expectedPositions));
    }
}