Example usage for org.apache.lucene.search DocIdSetIterator nextDoc

List of usage examples for org.apache.lucene.search DocIdSetIterator nextDoc

Introduction

In this page you can find the example usage for org.apache.lucene.search DocIdSetIterator nextDoc.

Prototype

public abstract int nextDoc() throws IOException;

Source Link

Document

Advances to the next document in the set and returns the doc it is currently on, or #NO_MORE_DOCS if there are no more docs in the set.
NOTE: after the iterator has exhausted you should not call this method, as it may result in unpredicted behavior.

Usage

From source file:org.elasticsearch.common.lucene.search.AndDocIdSetTests.java

License:Apache License

public void testDuel() throws IOException {
    for (int iter = 0; iter < 1000; ++iter) {
        final int numSets = 1 + random().nextInt(5);
        final int numDocs = 1 + random().nextInt(1000);
        FixedBitSet anded = new FixedBitSet(numDocs);
        anded.set(0, numDocs);/*from   w  ww  .j ava2  s.c  o  m*/
        final DocIdSet[] sets = new DocIdSet[numSets];
        for (int i = 0; i < numSets; ++i) {
            final FixedBitSet randomSet = randomBitSet(numDocs);

            anded.and(randomSet);

            if (random().nextBoolean()) {
                // will be considered 'fast' by AndDocIdSet
                sets[i] = new BitDocIdSet(randomSet);
            } else {
                // will be considered 'slow' by AndDocIdSet
                sets[i] = new DocValuesDocIdSet(numDocs, null) {
                    @Override
                    protected boolean matchDoc(int doc) {
                        return randomSet.get(doc);
                    }
                };
            }
        }
        AndDocIdSet andSet = new AndDocIdSet(sets);
        Bits andBits = andSet.bits();
        if (andBits != null) {
            for (int i = 0; i < numDocs; ++i) {
                assertEquals(anded.get(i), andBits.get(i));
            }
        }
        DocIdSetIterator andIt = andSet.iterator();
        if (andIt == null) {
            assertEquals(0, anded.cardinality());
        } else {
            int previous = -1;
            for (int doc = andIt.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = andIt.nextDoc()) {
                for (int j = previous + 1; j < doc; ++j) {
                    assertFalse(anded.get(j));
                }
                assertTrue(anded.get(doc));
                previous = doc;
            }
            for (int j = previous + 1; j < numDocs; ++j) {
                assertFalse(anded.get(j));
            }
        }
    }
}

From source file:org.elasticsearch.common.lucene.search.RedisFilter.java

License:Apache License

@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
    logger.info("redis key:" + term.field() + ",redis value:" + term.text());

    //TODO redis????Key???

    final AllDocIdSet result = new AllDocIdSet(context.reader().maxDoc());
    DocIdSetIterator docsEnum = result.iterator();
    int docId = docsEnum.nextDoc();
    if (docId == DocsEnum.NO_MORE_DOCS) {
        return null;
    }/*w  w  w . ja va 2  s.  co  m*/

    final FixedBitSet finalResult = new FixedBitSet(context.reader().maxDoc());
    for (; docId < DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
        Document doc = context.reader().document(docId);
        String uid = doc.getField("_uid").stringValue();
        Uid id = Uid.createUid(uid);
        logger.info("type:" + id.type() + ",uid:" + id.id());

        boolean hitPermission = false;

        //Notice ???
        if (id.id().equals("1")) {
            hitPermission = false;
            logger.info("??");
        } else {
            logger.info("??");
            hitPermission = true;
        }

        if (hitPermission) {
            finalResult.set(docId);
        }
    }
    return finalResult;
}

From source file:org.elasticsearch.common.lucene.search.XBooleanFilter.java

License:Apache License

/**
 * Returns the a DocIdSetIterator representing the Boolean composition
 * of the filters that have been added./*from w ww  .j  a v a  2s .co  m*/
 */
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
    FixedBitSet res = null;
    final AtomicReader reader = context.reader();

    // optimize single case...
    if (clauses.size() == 1) {
        FilterClause clause = clauses.get(0);
        DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs);
        if (clause.getOccur() == Occur.MUST_NOT) {
            if (DocIdSets.isEmpty(set)) {
                return new AllDocIdSet(reader.maxDoc());
            } else {
                return new NotDocIdSet(set, reader.maxDoc());
            }
        }
        // SHOULD or MUST, just return the set...
        if (DocIdSets.isEmpty(set)) {
            return null;
        }
        return set;
    }

    // first, go over and see if we can shortcut the execution
    // and gather Bits if we need to
    List<ResultClause> results = new ArrayList<ResultClause>(clauses.size());
    boolean hasShouldClauses = false;
    boolean hasNonEmptyShouldClause = false;
    boolean hasMustClauses = false;
    boolean hasMustNotClauses = false;
    for (int i = 0; i < clauses.size(); i++) {
        FilterClause clause = clauses.get(i);
        DocIdSet set = clause.getFilter().getDocIdSet(context, acceptDocs);
        if (clause.getOccur() == Occur.MUST) {
            hasMustClauses = true;
            if (DocIdSets.isEmpty(set)) {
                return null;
            }
        } else if (clause.getOccur() == Occur.SHOULD) {
            hasShouldClauses = true;
            if (DocIdSets.isEmpty(set)) {
                continue;
            }
            hasNonEmptyShouldClause = true;
        } else if (clause.getOccur() == Occur.MUST_NOT) {
            hasMustNotClauses = true;
            if (DocIdSets.isEmpty(set)) {
                // we mark empty ones as null for must_not, handle it in the next run...
                results.add(new ResultClause(null, null, clause));
                continue;
            }
        }
        Bits bits = null;
        if (!DocIdSets.isFastIterator(set)) {
            bits = set.bits();
        }
        results.add(new ResultClause(set, bits, clause));
    }

    if (hasShouldClauses && !hasNonEmptyShouldClause) {
        return null;
    }

    // now, go over the clauses and apply the "fast" ones first...
    hasNonEmptyShouldClause = false;
    boolean hasBits = false;
    // But first we need to handle the "fast" should clauses, otherwise a should clause can unset docs
    // that don't match with a must or must_not clause.
    List<ResultClause> fastOrClauses = new ArrayList<ResultClause>();
    for (int i = 0; i < results.size(); i++) {
        ResultClause clause = results.get(i);
        // we apply bits in based ones (slow) in the second run
        if (clause.bits != null) {
            hasBits = true;
            continue;
        }
        if (clause.clause.getOccur() == Occur.SHOULD) {
            if (hasMustClauses || hasMustNotClauses) {
                fastOrClauses.add(clause);
            } else if (res == null) {
                DocIdSetIterator it = clause.docIdSet.iterator();
                if (it != null) {
                    hasNonEmptyShouldClause = true;
                    res = new FixedBitSet(reader.maxDoc());
                    res.or(it);
                }
            } else {
                DocIdSetIterator it = clause.docIdSet.iterator();
                if (it != null) {
                    hasNonEmptyShouldClause = true;
                    res.or(it);
                }
            }
        }
    }

    // Now we safely handle the "fast" must and must_not clauses.
    for (int i = 0; i < results.size(); i++) {
        ResultClause clause = results.get(i);
        // we apply bits in based ones (slow) in the second run
        if (clause.bits != null) {
            hasBits = true;
            continue;
        }
        if (clause.clause.getOccur() == Occur.MUST) {
            DocIdSetIterator it = clause.docIdSet.iterator();
            if (it == null) {
                return null;
            }
            if (res == null) {
                res = new FixedBitSet(reader.maxDoc());
                res.or(it);
            } else {
                res.and(it);
            }
        } else if (clause.clause.getOccur() == Occur.MUST_NOT) {
            if (res == null) {
                res = new FixedBitSet(reader.maxDoc());
                res.set(0, reader.maxDoc()); // NOTE: may set bits on deleted docs
            }
            if (clause.docIdSet != null) {
                DocIdSetIterator it = clause.docIdSet.iterator();
                if (it != null) {
                    res.andNot(it);
                }
            }
        }
    }

    if (!hasBits) {
        if (!fastOrClauses.isEmpty()) {
            DocIdSetIterator it = res.iterator();
            at_least_one_should_clause_iter: for (int setDoc = it
                    .nextDoc(); setDoc != DocIdSetIterator.NO_MORE_DOCS; setDoc = it.nextDoc()) {
                for (ResultClause fastOrClause : fastOrClauses) {
                    DocIdSetIterator clauseIterator = fastOrClause.iterator();
                    if (clauseIterator == null) {
                        continue;
                    }
                    if (iteratorMatch(clauseIterator, setDoc)) {
                        hasNonEmptyShouldClause = true;
                        continue at_least_one_should_clause_iter;
                    }
                }
                res.clear(setDoc);
            }
        }

        if (hasShouldClauses && !hasNonEmptyShouldClause) {
            return null;
        } else {
            return res;
        }
    }

    // we have some clauses with bits, apply them...
    // we let the "res" drive the computation, and check Bits for that
    List<ResultClause> slowOrClauses = new ArrayList<ResultClause>();
    for (int i = 0; i < results.size(); i++) {
        ResultClause clause = results.get(i);
        if (clause.bits == null) {
            continue;
        }
        if (clause.clause.getOccur() == Occur.SHOULD) {
            if (hasMustClauses || hasMustNotClauses) {
                slowOrClauses.add(clause);
            } else {
                if (res == null) {
                    DocIdSetIterator it = clause.docIdSet.iterator();
                    if (it == null) {
                        continue;
                    }
                    hasNonEmptyShouldClause = true;
                    res = new FixedBitSet(reader.maxDoc());
                    res.or(it);
                } else {
                    for (int doc = 0; doc < reader.maxDoc(); doc++) {
                        if (!res.get(doc) && clause.bits.get(doc)) {
                            hasNonEmptyShouldClause = true;
                            res.set(doc);
                        }
                    }
                }
            }
        } else if (clause.clause.getOccur() == Occur.MUST) {
            if (res == null) {
                // nothing we can do, just or it...
                res = new FixedBitSet(reader.maxDoc());
                DocIdSetIterator it = clause.docIdSet.iterator();
                if (it == null) {
                    return null;
                }
                res.or(it);
            } else {
                Bits bits = clause.bits;
                // use the "res" to drive the iteration
                DocIdSetIterator it = res.iterator();
                for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
                    if (!bits.get(doc)) {
                        res.clear(doc);
                    }
                }
            }
        } else if (clause.clause.getOccur() == Occur.MUST_NOT) {
            if (res == null) {
                res = new FixedBitSet(reader.maxDoc());
                res.set(0, reader.maxDoc()); // NOTE: may set bits on deleted docs
                DocIdSetIterator it = clause.docIdSet.iterator();
                if (it != null) {
                    res.andNot(it);
                }
            } else {
                Bits bits = clause.bits;
                // let res drive the iteration
                DocIdSetIterator it = res.iterator();
                for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
                    if (bits.get(doc)) {
                        res.clear(doc);
                    }
                }
            }
        }
    }

    // From a boolean_logic behavior point of view a should clause doesn't have impact on a bool filter if there
    // is already a must or must_not clause. However in the current ES bool filter behaviour at least one should
    // clause must match in order for a doc to be a match. What we do here is checking if matched docs match with
    // any should filter. TODO: Add an option to have disable minimum_should_match=1 behaviour
    if (!slowOrClauses.isEmpty() || !fastOrClauses.isEmpty()) {
        DocIdSetIterator it = res.iterator();
        at_least_one_should_clause_iter: for (int setDoc = it
                .nextDoc(); setDoc != DocIdSetIterator.NO_MORE_DOCS; setDoc = it.nextDoc()) {
            for (ResultClause fastOrClause : fastOrClauses) {
                DocIdSetIterator clauseIterator = fastOrClause.iterator();
                if (clauseIterator == null) {
                    continue;
                }
                if (iteratorMatch(clauseIterator, setDoc)) {
                    hasNonEmptyShouldClause = true;
                    continue at_least_one_should_clause_iter;
                }
            }
            for (ResultClause slowOrClause : slowOrClauses) {
                if (slowOrClause.bits.get(setDoc)) {
                    hasNonEmptyShouldClause = true;
                    continue at_least_one_should_clause_iter;
                }
            }
            res.clear(setDoc);
        }
    }

    if (hasShouldClauses && !hasNonEmptyShouldClause) {
        return null;
    } else {
        return res;
    }

}

From source file:org.elasticsearch.common.lucene.search.XBooleanFilterLuceneTests.java

License:Apache License

private void tstFilterCard(String mes, int expected, Filter filt) throws Exception {
    int actual = 0;
    DocIdSet docIdSet = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs());
    if (docIdSet != null) {
        DocIdSetIterator disi = docIdSet.iterator();
        if (disi != null) {
            while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                actual++;/*  w ww . j  a  v  a  2 s .  c o  m*/
            }
        }
    }
    assertThat(mes, actual, equalTo(expected));
}

From source file:org.elasticsearch.index.cache.filter.support.FilterCacheValue.java

License:Apache License

public static DocSet cacheable(IndexReader reader, @Nullable LongsLAB longsLAB, DocIdSet set)
        throws IOException {
    if (set == null) {
        return DocSet.EMPTY_DOC_SET;
    }/*www . j a  v  a  2 s .  c o m*/
    if (set == DocIdSet.EMPTY_DOCIDSET) {
        return DocSet.EMPTY_DOC_SET;
    }

    DocIdSetIterator it = set.iterator();
    if (it == null) {
        return DocSet.EMPTY_DOC_SET;
    }
    int doc = it.nextDoc();
    if (doc == DocIdSetIterator.NO_MORE_DOCS) {
        return DocSet.EMPTY_DOC_SET;
    }

    // we have a LAB, check if can be used...
    if (longsLAB == null) {
        return DocSets.cacheable(reader, set);
    }

    int numOfWords = OpenBitSet.bits2words(reader.maxDoc());
    LongsLAB.Allocation allocation = longsLAB.allocateLongs(numOfWords);
    if (allocation == null) {
        return DocSets.cacheable(reader, set);
    }
    // we have an allocation, use it to create SlicedOpenBitSet
    if (set instanceof OpenBitSet) {
        return new SlicedOpenBitSet(allocation.getData(), allocation.getOffset(), (OpenBitSet) set);
    } else if (set instanceof OpenBitDocSet) {
        return new SlicedOpenBitSet(allocation.getData(), allocation.getOffset(), ((OpenBitDocSet) set).set());
    } else {
        SlicedOpenBitSet slicedSet = new SlicedOpenBitSet(allocation.getData(), numOfWords,
                allocation.getOffset());
        slicedSet.fastSet(doc); // we already have an open iterator, so use it, and don't forget to set the initial one
        while ((doc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
            slicedSet.fastSet(doc);
        }
        return slicedSet;
    }
}

From source file:org.elasticsearch.index.cache.fixedbitset.FixedBitSetFilterCache.java

License:Apache License

private FixedBitSet getAndLoadIfNotPresent(final Filter filter, final AtomicReaderContext context)
        throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    Cache<Filter, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Filter, Value>>() {
        @Override//from   www  . ja  v  a2s .  c  o m
        public Cache<Filter, Value> call() throws Exception {
            SegmentReaderUtils.registerCoreListener(context.reader(), FixedBitSetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(filter, new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            DocIdSet docIdSet = filter.getDocIdSet(context, null);
            final FixedBitSet fixedBitSet;
            if (docIdSet instanceof FixedBitSet) {
                fixedBitSet = (FixedBitSet) docIdSet;
            } else {
                fixedBitSet = new FixedBitSet(context.reader().maxDoc());
                if (docIdSet != null && docIdSet != DocIdSet.EMPTY) {
                    DocIdSetIterator iterator = docIdSet.iterator();
                    if (iterator != null) {
                        int doc = iterator.nextDoc();
                        if (doc != DocIdSetIterator.NO_MORE_DOCS) {
                            do {
                                fixedBitSet.set(doc);
                                doc = iterator.nextDoc();
                            } while (doc != DocIdSetIterator.NO_MORE_DOCS);
                        }
                    }
                }
            }

            Value value = new Value(fixedBitSet, shardId);
            if (shardId != null) {
                IndexShard shard = indexService.shard(shardId.id());
                if (shard != null) {
                    shard.shardFixedBitSetFilterCache().onCached(value.fixedBitSet.ramBytesUsed());
                }
            }
            return value;
        }
    }).fixedBitSet;
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCache.java

License:Apache License

QueriesLeaf loadQueries(LeafReaderContext context, IndexShard indexShard) throws IOException {
    Version indexVersionCreated = indexShard.indexSettings().getIndexVersionCreated();
    MapperService mapperService = indexShard.mapperService();
    LeafReader leafReader = context.reader();
    ShardId shardId = ShardUtils.extractShardId(leafReader);
    if (shardId == null) {
        throw new IllegalStateException("can't resolve shard id");
    }// ww w.  ja  v  a2s  .  c om
    if (indexSettings.getIndex().equals(shardId.getIndex()) == false) {
        // percolator cache insanity
        String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index "
                + indexSettings.getIndex();
        throw new IllegalStateException(message);
    }

    IntObjectHashMap<Query> queries = new IntObjectHashMap<>();
    boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1);
    if (legacyLoading) {
        PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, LEGACY_TYPE_NAME),
                PostingsEnum.NONE);
        if (postings != null) {
            LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor();
            for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings
                    .nextDoc()) {
                leafReader.document(docId, visitor);
                queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source));
                visitor.source = null; // reset
            }
        }
    } else {
        // Each type can have one percolator field mapper,
        // So for each type we check if there is a percolator field mapper
        // and parse all the queries for the documents of that type.
        IndexSearcher indexSearcher = new IndexSearcher(leafReader);
        for (DocumentMapper documentMapper : mapperService.docMappers(false)) {
            Weight queryWeight = indexSearcher.createNormalizedWeight(documentMapper.typeFilter(), false);
            for (FieldMapper fieldMapper : documentMapper.mappers()) {
                if (fieldMapper instanceof PercolatorFieldMapper) {
                    PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType();
                    BinaryDocValues binaryDocValues = leafReader
                            .getBinaryDocValues(fieldType.getQueryBuilderFieldName());
                    if (binaryDocValues != null) {
                        // use the same leaf reader context the indexSearcher is using too:
                        Scorer scorer = queryWeight.scorer(leafReader.getContext());
                        if (scorer != null) {
                            DocIdSetIterator iterator = scorer.iterator();
                            for (int docId = iterator
                                    .nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = iterator
                                            .nextDoc()) {
                                BytesRef qbSource = binaryDocValues.get(docId);
                                if (qbSource.length > 0) {
                                    queries.put(docId, parseQueryBuilder(docId, qbSource));
                                }
                            }
                        }
                    }
                    break;
                }
            }
        }
    }
    leafReader.addCoreClosedListener(this);
    return new QueriesLeaf(shardId, queries);
}

From source file:org.elasticsearch.index.search.child.AbstractChildTestCase.java

License:Apache License

static String reason(BitDocIdSet actual, BitDocIdSet expected, IndexSearcher indexSearcher) throws IOException {
    StringBuilder builder = new StringBuilder();
    builder.append("expected cardinality:").append(expected.bits().cardinality()).append('\n');
    DocIdSetIterator iterator = expected.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Expected doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }/*w  w  w  .  j a v  a2  s.c  o m*/
    builder.append("actual cardinality: ").append(actual.bits().cardinality()).append('\n');
    iterator = actual.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Actual doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }
    return builder.toString();
}

From source file:org.elasticsearch.index.search.child.AbstractChildTests.java

License:Apache License

static String reason(FixedBitSet actual, FixedBitSet expected, IndexSearcher indexSearcher) throws IOException {
    StringBuilder builder = new StringBuilder();
    builder.append("expected cardinality:").append(expected.cardinality()).append('\n');
    DocIdSetIterator iterator = expected.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Expected doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }/*from w  ww .  j  a  va2  s.c om*/
    builder.append("actual cardinality: ").append(actual.cardinality()).append('\n');
    iterator = actual.iterator();
    for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
        builder.append("Actual doc[").append(doc).append("] with id value ")
                .append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
    }
    return builder.toString();
}

From source file:org.elasticsearch.index.shard.ShardSplittingQueryTests.java

License:Apache License

void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId, boolean hasNested)
        throws IOException {
    try (IndexReader reader = DirectoryReader.open(dir)) {
        IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);/*ww  w . j a  v  a2  s . com*/
        final boolean needsScores = false;
        final Weight splitWeight = searcher.createNormalizedWeight(
                new ShardSplittingQuery(metaData, targetShardId, hasNested), needsScores);
        final List<LeafReaderContext> leaves = reader.leaves();
        for (final LeafReaderContext ctx : leaves) {
            Scorer scorer = splitWeight.scorer(ctx);
            DocIdSetIterator iterator = scorer.iterator();
            SortedNumericDocValues shard_id = ctx.reader().getSortedNumericDocValues("shard_id");
            int numExpected = 0;
            while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                if (targetShardId == shard_id.nextValue()) {
                    numExpected++;
                }
            }
            if (numExpected == ctx.reader().maxDoc()) {
                // all docs belong in this shard
                assertEquals(DocIdSetIterator.NO_MORE_DOCS, iterator.nextDoc());
            } else {
                shard_id = ctx.reader().getSortedNumericDocValues("shard_id");
                int doc;
                int numActual = 0;
                int lastDoc = 0;
                while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                    lastDoc = doc;
                    while (shard_id.nextDoc() < doc) {
                        long shardID = shard_id.nextValue();
                        assertEquals(shardID, targetShardId);
                        numActual++;
                    }
                    assertEquals(shard_id.docID(), doc);
                    long shardID = shard_id.nextValue();
                    BytesRef id = reader.document(doc).getBinaryValue("_id");
                    String actualId = Uid.decodeId(id.bytes, id.offset, id.length);
                    assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID,
                            targetShardId);
                }
                if (lastDoc < ctx.reader().maxDoc()) {
                    // check the last docs in the segment and make sure they all have the right shard id
                    while (shard_id.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                        long shardID = shard_id.nextValue();
                        assertEquals(shardID, targetShardId);
                        numActual++;
                    }
                }

                assertEquals(numExpected, numActual);
            }
        }
    }
}