Example usage for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS

List of usage examples for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS

Introduction

In this page you can find the example usage for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS.

Prototype

int NO_MORE_DOCS

To view the source code for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS.

Click Source Link

Document

When returned by #nextDoc() , #advance(int) and #docID() it means there are no more docs in the iterator.

Usage

From source file:org.elasticsearch.index.search.child.AbstractChildTestCase.java

License:Apache License

static String reason(BitDocIdSet actual, BitDocIdSet expected, IndexSearcher indexSearcher) throws IOException {
    StringBuilder builder = new StringBuilder();
    builder.append("expected cardinality:").append(expected.bits().cardinality()).append('\n');
    DocIdSetIterator iterator = expected.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Expected doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }/*  w  ww .  j  ava 2 s  . com*/
    builder.append("actual cardinality: ").append(actual.bits().cardinality()).append('\n');
    iterator = actual.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Actual doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }
    return builder.toString();
}

From source file:org.elasticsearch.index.search.child.AbstractChildTests.java

License:Apache License

static String reason(FixedBitSet actual, FixedBitSet expected, IndexSearcher indexSearcher) throws IOException {
    StringBuilder builder = new StringBuilder();
    builder.append("expected cardinality:").append(expected.cardinality()).append('\n');
    DocIdSetIterator iterator = expected.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Expected doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }//  w  ww  .  j a  v a  2  s .  c om
    builder.append("actual cardinality: ").append(actual.cardinality()).append('\n');
    iterator = actual.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Actual doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }
    return builder.toString();
}

From source file:org.elasticsearch.index.search.child.ParentIdsFilter.java

License:Apache License

@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
    Terms terms = context.reader().terms(UidFieldMapper.NAME);
    if (terms == null) {
        return null;
    }/*  ww w  .ja v a2s  .  co  m*/

    TermsEnum termsEnum = terms.iterator(null);
    BytesRef uidSpare = new BytesRef();
    BytesRef idSpare = new BytesRef();

    if (acceptDocs == null) {
        acceptDocs = context.reader().getLiveDocs();
    }

    FixedBitSet nonNestedDocs = null;
    if (nonNestedDocsFilter != null) {
        nonNestedDocs = (FixedBitSet) nonNestedDocsFilter.getDocIdSet(context, acceptDocs);
    }

    DocsEnum docsEnum = null;
    FixedBitSet result = null;
    long size = parentIds.size();
    for (int i = 0; i < size; i++) {
        parentIds.get(i, idSpare);
        Uid.createUidAsBytes(parentTypeBr, idSpare, uidSpare);
        if (termsEnum.seekExact(uidSpare)) {
            int docId;
            docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
            if (result == null) {
                docId = docsEnum.nextDoc();
                if (docId != DocIdSetIterator.NO_MORE_DOCS) {
                    result = new FixedBitSet(context.reader().maxDoc());
                } else {
                    continue;
                }
            } else {
                docId = docsEnum.nextDoc();
                if (docId == DocIdSetIterator.NO_MORE_DOCS) {
                    continue;
                }
            }
            if (nonNestedDocs != null && !nonNestedDocs.get(docId)) {
                docId = nonNestedDocs.nextSetBit(docId);
            }
            result.set(docId);
            assert docsEnum.advance(docId + 1) == DocIdSetIterator.NO_MORE_DOCS : "DocId " + docId
                    + " should have been the last one but docId " + docsEnum.docID() + " exists.";
        }
    }
    return result;
}

From source file:org.elasticsearch.index.shard.ShardSplittingQuery.java

License:Apache License

private void markChildDocs(BitSet parentDocs, BitSet matchingDocs) {
    int currentDeleted = 0;
    while (currentDeleted < matchingDocs.length()
            && (currentDeleted = matchingDocs.nextSetBit(currentDeleted)) != DocIdSetIterator.NO_MORE_DOCS) {
        int previousParent = parentDocs.prevSetBit(Math.max(0, currentDeleted - 1));
        for (int i = previousParent + 1; i < currentDeleted; i++) {
            matchingDocs.set(i);/*w w w  .  j  av a 2 s.c  om*/
        }
        currentDeleted++;
    }
}

From source file:org.elasticsearch.index.shard.ShardSplittingQuery.java

License:Apache License

private static void findSplitDocs(String idField, Predicate<BytesRef> includeInShard, LeafReader leafReader,
        IntConsumer consumer) throws IOException {
    Terms terms = leafReader.terms(idField);
    TermsEnum iterator = terms.iterator();
    BytesRef idTerm;/* w  w w .  java 2s .c  om*/
    PostingsEnum postingsEnum = null;
    while ((idTerm = iterator.next()) != null) {
        if (includeInShard.test(idTerm) == false) {
            postingsEnum = iterator.postings(postingsEnum);
            int doc;
            while ((doc = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                consumer.accept(doc);
            }
        }
    }
}

From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java

License:Apache License

void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId, boolean hasNested)
        throws IOException {
    try (IndexReader reader = DirectoryReader.open(dir)) {
        IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);//ww  w  .j  av a2 s . c  o  m
        final boolean needsScores = false;
        final Weight splitWeight = searcher.createNormalizedWeight(
                new ShardSplittingQuery(metaData, targetShardId, hasNested), needsScores);
        final List<LeafReaderContext> leaves = reader.leaves();
        for (final LeafReaderContext ctx : leaves) {
            Scorer scorer = splitWeight.scorer(ctx);
            DocIdSetIterator iterator = scorer.iterator();
            SortedNumericDocValues shard_id = ctx.reader().getSortedNumericDocValues("shard_id");
            int numExpected = 0;
            while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                if (targetShardId == shard_id.nextValue()) {
                    numExpected++;
                }
            }
            if (numExpected == ctx.reader().maxDoc()) {
                // all docs belong in this shard
                assertEquals(DocIdSetIterator.NO_MORE_DOCS, iterator.nextDoc());
            } else {
                shard_id = ctx.reader().getSortedNumericDocValues("shard_id");
                int doc;
                int numActual = 0;
                int lastDoc = 0;
                while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                    lastDoc = doc;
                    while (shard_id.nextDoc() < doc) {
                        long shardID = shard_id.nextValue();
                        assertEquals(shardID, targetShardId);
                        numActual++;
                    }
                    assertEquals(shard_id.docID(), doc);
                    long shardID = shard_id.nextValue();
                    BytesRef id = reader.document(doc).getBinaryValue("_id");
                    String actualId = Uid.decodeId(id.bytes, id.offset, id.length);
                    assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID,
                            targetShardId);
                }
                if (lastDoc < ctx.reader().maxDoc()) {
                    // check the last docs in the segment and make sure they all have the right shard id
                    while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                        long shardID = shard_id.nextValue();
                        assertEquals(shardID, targetShardId);
                        numActual++;
                    }
                }

                assertEquals(numExpected, numActual);
            }
        }
    }
}

From source file:org.elasticsearch.join.aggregations.ParentToChildrenAggregator.java

License:Apache License

@Override
protected void doPostCollection() throws IOException {
    IndexReader indexReader = context().searcher().getIndexReader();
    for (LeafReaderContext ctx : indexReader.leaves()) {
        Scorer childDocsScorer = childFilter.scorer(ctx);
        if (childDocsScorer == null) {
            continue;
        }/*from  ww  w.j a  va  2 s.c  o m*/
        DocIdSetIterator childDocsIter = childDocsScorer.iterator();

        final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);

        final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx);
        // Set the scorer, since we now replay only the child docIds
        sub.setScorer(new ConstantScoreScorer(null, 1f, childDocsIter));

        final Bits liveDocs = ctx.reader().getLiveDocs();
        for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter
                .nextDoc()) {
            if (liveDocs != null && liveDocs.get(docId) == false) {
                continue;
            }
            if (globalOrdinals.advanceExact(docId)) {
                long globalOrdinal = globalOrdinals.nextOrd();
                assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS;
                long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
                if (bucketOrd != -1) {
                    collectBucket(sub, docId, bucketOrd);
                    if (multipleBucketsPerParentOrd) {
                        long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
                        if (otherBucketOrds != null) {
                            for (long otherBucketOrd : otherBucketOrds) {
                                collectBucket(sub, docId, otherBucketOrd);
                            }
                        }
                    }
                }
            }
        }
    }
}

From source file:org.elasticsearch.search.aggregations.bucket.children.ParentToChildrenAggregator.java

License:Apache License

@Override
protected void doPostCollection() throws IOException {
    IndexReader indexReader = context().searchContext().searcher().getIndexReader();
    for (LeafReaderContext ctx : indexReader.leaves()) {
        Scorer childDocsScorer = childFilter.scorer(ctx);
        if (childDocsScorer == null) {
            continue;
        }/*from w w  w  . j  a v a 2 s .c om*/
        DocIdSetIterator childDocsIter = childDocsScorer.iterator();

        final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
        final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);

        // Set the scorer, since we now replay only the child docIds
        sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f));

        final Bits liveDocs = ctx.reader().getLiveDocs();
        for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter
                .nextDoc()) {
            if (liveDocs != null && liveDocs.get(docId) == false) {
                continue;
            }
            long globalOrdinal = globalOrdinals.getOrd(docId);
            if (globalOrdinal != -1) {
                long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
                if (bucketOrd != -1) {
                    collectBucket(sub, docId, bucketOrd);
                    if (multipleBucketsPerParentOrd) {
                        long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
                        if (otherBucketOrds != null) {
                            for (long otherBucketOrd : otherBucketOrds) {
                                collectBucket(sub, docId, otherBucketOrd);
                            }
                        }
                    }
                }
            }
        }
    }
}

From source file:org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregator.java

License:Apache License

/**
 * Replay the documents that might contain a top bucket and pass top buckets to
 * the {@link this#deferredCollectors}.//from   w  w  w . j a  v a2 s .c  o  m
 */
private void runDeferredCollections() throws IOException {
    final boolean needsScores = needsScores();
    Weight weight = null;
    if (needsScores) {
        Query query = context.query();
        weight = context.searcher().createNormalizedWeight(query, true);
    }
    deferredCollectors.preCollection();
    for (Entry entry : entries) {
        DocIdSetIterator docIdSetIterator = entry.docIdSet.iterator();
        if (docIdSetIterator == null) {
            continue;
        }
        final LeafBucketCollector subCollector = deferredCollectors.getLeafCollector(entry.context);
        final LeafBucketCollector collector = queue.getLeafCollector(entry.context,
                getSecondPassCollector(subCollector));
        DocIdSetIterator scorerIt = null;
        if (needsScores) {
            Scorer scorer = weight.scorer(entry.context);
            if (scorer != null) {
                scorerIt = scorer.iterator();
                subCollector.setScorer(scorer);
            }
        }
        int docID;
        while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
            if (needsScores) {
                assert scorerIt != null && scorerIt.docID() < docID;
                scorerIt.advance(docID);
                // aggregations should only be replayed on matching documents
                assert scorerIt.docID() == docID;
            }
            collector.collect(docID);
        }
    }
    deferredCollectors.postCollection();
}

From source file:org.elasticsearch.search.aggregations.bucket.composite.SortedDocsProducer.java

License:Apache License

/**
 * Visits all non-deleted documents in <code>iterator</code> and fills the provided <code>queue</code>
 * with the top composite buckets extracted from the collection.
 * Documents that contain a top composite bucket are added in the provided <code>builder</code> if it is not null.
 *
 * Returns true if the queue is full and the current <code>leadSourceBucket</code> did not produce any competitive
 * composite buckets.//from ww w  . j av a2  s.  c o  m
 */
protected boolean processBucket(CompositeValuesCollectorQueue queue, LeafReaderContext context,
        DocIdSetIterator iterator, Comparable<?> leadSourceBucket, @Nullable DocIdSetBuilder builder)
        throws IOException {
    final int[] topCompositeCollected = new int[1];
    final boolean[] hasCollected = new boolean[1];
    final LeafBucketCollector queueCollector = new LeafBucketCollector() {
        int lastDoc = -1;

        // we need to add the matching document in the builder
        // so we build a bulk adder from the approximate cost of the iterator
        // and rebuild the adder during the collection if needed
        int remainingBits = (int) Math.min(iterator.cost(), Integer.MAX_VALUE);
        DocIdSetBuilder.BulkAdder adder = builder == null ? null : builder.grow(remainingBits);

        @Override
        public void collect(int doc, long bucket) throws IOException {
            hasCollected[0] = true;
            int slot = queue.addIfCompetitive();
            if (slot != -1) {
                topCompositeCollected[0]++;
                if (adder != null && doc != lastDoc) {
                    if (remainingBits == 0) {
                        // the cost approximation was lower than the real size, we need to grow the adder
                        // by some numbers (128) to ensure that we can add the extra documents
                        adder = builder.grow(128);
                        remainingBits = 128;
                    }
                    adder.add(doc);
                    remainingBits--;
                    lastDoc = doc;
                }
            }
        }
    };
    final Bits liveDocs = context.reader().getLiveDocs();
    final LeafBucketCollector collector = queue.getLeafCollector(leadSourceBucket, context, queueCollector);
    while (iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
        if (liveDocs == null || liveDocs.get(iterator.docID())) {
            collector.collect(iterator.docID());
        }
    }
    if (queue.isFull() && hasCollected[0] && topCompositeCollected[0] == 0) {
        return true;
    }
    return false;
}