Example usage for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS

List of usage examples for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS

Introduction

In this page you can find the example usage for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS.

Prototype

int NO_MORE_DOCS

To view the source code for org.apache.lucene.search DocIdSetIterator NO_MORE_DOCS.

Click Source Link

Document

When returned by #nextDoc() , #advance(int) and #docID() it means there are no more docs in the iterator.

Usage

From source file:org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.java

License:Apache License

DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException {
    final DateTimeZone tz = timeZone();
    if (field() != null && tz != null && tz.isFixed() == false && field() != null && script() == null) {
        final MappedFieldType ft = context.fieldMapper(field());
        final IndexReader reader = context.getIndexReader();
        if (ft != null && reader != null) {
            Long anyInstant = null;
            final IndexNumericFieldData fieldData = context.getForField(ft);
            for (LeafReaderContext ctx : reader.leaves()) {
                AtomicNumericFieldData leafFD = fieldData.load(ctx);
                SortedNumericDocValues values = leafFD.getLongValues();
                if (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                    anyInstant = values.nextValue();
                    break;
                }//ww  w  .  j av  a  2s .com
            }

            if (anyInstant != null) {
                final long prevTransition = tz.previousTransition(anyInstant);
                final long nextTransition = tz.nextTransition(anyInstant);

                // We need all not only values but also rounded values to be within
                // [prevTransition, nextTransition].
                final long low;
                DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit();
                if (intervalAsUnit != null) {
                    final DateTimeField dateTimeField = intervalAsUnit.field(tz);
                    low = dateTimeField.roundCeiling(prevTransition);
                } else {
                    final TimeValue intervalAsMillis = getIntervalAsTimeValue();
                    low = Math.addExact(prevTransition, intervalAsMillis.millis());
                }
                // rounding rounds down, so 'nextTransition' is a good upper bound
                final long high = nextTransition;

                if (ft.isFieldWithinQuery(reader, low, high, true, false, DateTimeZone.UTC, EPOCH_MILLIS_PARSER,
                        context) == Relation.WITHIN) {
                    // All values in this reader have the same offset despite daylight saving times.
                    // This is very common for location-based timezones such as Europe/Paris in
                    // combination with time-based indices.
                    return DateTimeZone.forOffsetMillis(tz.getOffset(anyInstant));
                }
            }
        }
    }
    return tz;
}

From source file:org.elasticsearch.search.fetch.innerhits.NestedChildrenFilterTest.java

License:Apache License

@Test
public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();//from  ww w . j a va2s .c om
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new IntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    FixedBitSetFilterCache fixedBitSetFilterCache = new FixedBitSetFilterCache(new Index("test"),
            ImmutableSettings.EMPTY);
    FixedBitSetFilter parentFilter = fixedBitSetFilterCache
            .getFixedBitSetFilter(new TermFilter(new Term("type", "parent")));
    Filter childFilter = new TermFilter(new Term("type", "child"));
    int checkedParents = 0;
    for (AtomicReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentFilter.getDocIdSet(leaf, null).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = parents
                .nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue()
                    .intValue();
            hitContext.reset(null, leaf, parentDoc, reader);
            NestedChildrenFilter nestedChildrenFilter = new NestedChildrenFilter(parentFilter, childFilter,
                    hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}

From source file:org.elasticsearch.search.fetch.innerhits.NestedChildrenFilterTests.java

License:Apache License

@Test
public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();/*w  w  w  .  j  a  v  a2s. c om*/
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new IntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
    Query childFilter = new TermQuery(new Term("type", "child"));
    int checkedParents = 0;
    final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")),
            false);
    for (LeafReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = parents
                .nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue()
                    .intValue();
            hitContext.reset(null, leaf, parentDoc, searcher);
            NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter,
                    hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}

From source file:org.elasticsearch.search.fetch.subphase.NestedChildrenFilterTests.java

License:Apache License

public void testNestedChildrenFilter() throws Exception {
    int numParentDocs = scaledRandomIntBetween(0, 32);
    int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);

    Directory dir = newDirectory();/*w  w w. j  a  v  a  2 s.  c om*/
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numParentDocs; i++) {
        int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
        List<Document> docs = new ArrayList<>(numChildDocs + 1);
        for (int j = 0; j < numChildDocs; j++) {
            Document childDoc = new Document();
            childDoc.add(new StringField("type", "child", Field.Store.NO));
            docs.add(childDoc);
        }

        Document parenDoc = new Document();
        parenDoc.add(new StringField("type", "parent", Field.Store.NO));
        parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES));
        docs.add(parenDoc);
        writer.addDocuments(docs);
    }

    IndexReader reader = writer.getReader();
    writer.close();

    IndexSearcher searcher = new IndexSearcher(reader);
    FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent")));
    Query childFilter = new TermQuery(new Term("type", "child"));
    int checkedParents = 0;
    final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")),
            false);
    for (LeafReaderContext leaf : reader.leaves()) {
        DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator();
        for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = parents
                .nextDoc()) {
            int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue()
                    .intValue();
            hitContext.reset(null, leaf, parentDoc, searcher);
            NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter,
                    hitContext);
            TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
            searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
            assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
            checkedParents++;
        }
    }
    assertThat(checkedParents, equalTo(numParentDocs));
    reader.close();
    dir.close();
}

From source file:org.elasticsearch.search.suggest.completion.old.AnalyzingCompletionLookupProvider.java

License:Apache License

@Override
public FieldsConsumer consumer(final IndexOutput output) throws IOException {
    CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION_LATEST);
    return new FieldsConsumer() {
        private Map<String, Long> fieldOffsets = new HashMap<>();

        @Override/*from  w w  w.j  a  va2 s  .c o  m*/
        public void close() throws IOException {
            try {
                /*
                 * write the offsets per field such that we know where
                 * we need to load the FSTs from
                 */
                long pointer = output.getFilePointer();
                output.writeVInt(fieldOffsets.size());
                for (Map.Entry<String, Long> entry : fieldOffsets.entrySet()) {
                    output.writeString(entry.getKey());
                    output.writeVLong(entry.getValue());
                }
                output.writeLong(pointer);
                CodecUtil.writeFooter(output);
            } finally {
                IOUtils.close(output);
            }
        }

        @Override
        public void write(Fields fields) throws IOException {
            for (String field : fields) {
                Terms terms = fields.terms(field);
                if (terms == null) {
                    continue;
                }
                TermsEnum termsEnum = terms.iterator();
                PostingsEnum docsEnum = null;
                final SuggestPayload spare = new SuggestPayload();
                int maxAnalyzedPathsForOneInput = 0;
                final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(
                        maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP);
                int docCount = 0;
                while (true) {
                    BytesRef term = termsEnum.next();
                    if (term == null) {
                        break;
                    }
                    docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS);
                    builder.startTerm(term);
                    int docFreq = 0;
                    while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                        for (int i = 0; i < docsEnum.freq(); i++) {
                            final int position = docsEnum.nextPosition();
                            AnalyzingCompletionLookupProvider.this.parsePayload(docsEnum.getPayload(), spare);
                            builder.addSurface(spare.surfaceForm.get(), spare.payload.get(), spare.weight);
                            // multi fields have the same surface form so we sum up here
                            maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
                        }
                        docFreq++;
                        docCount = Math.max(docCount, docsEnum.docID() + 1);
                    }
                    builder.finishTerm(docFreq);
                }
                /*
                 * Here we are done processing the field and we can
                 * buid the FST and write it to disk.
                 */
                FST<Pair<Long, BytesRef>> build = builder.build();
                assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: ["
                        + docCount + "]";
                /*
                 * it's possible that the FST is null if we have 2 segments that get merged
                 * and all docs that have a value in this field are deleted. This will cause
                 * a consumer to be created but it doesn't consume any values causing the FSTBuilder
                 * to return null.
                 */
                if (build != null) {
                    fieldOffsets.put(field, output.getFilePointer());
                    build.save(output);
                    /* write some more meta-info */
                    output.writeVInt(maxAnalyzedPathsForOneInput);
                    output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
                    output.writeInt(maxGraphExpansions); // can be negative
                    int options = 0;
                    options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
                    options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
                    options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
                    output.writeVInt(options);
                    output.writeVInt(XAnalyzingSuggester.SEP_LABEL);
                    output.writeVInt(XAnalyzingSuggester.END_BYTE);
                    output.writeVInt(XAnalyzingSuggester.PAYLOAD_SEP);
                    output.writeVInt(XAnalyzingSuggester.HOLE_CHARACTER);
                }
            }
        }
    };
}

From source file:org.elasticsearch.search.suggest.completion.old.AnalyzingCompletionLookupProviderV1.java

License:Apache License

@Override
public FieldsConsumer consumer(final IndexOutput output) throws IOException {
    // TODO write index header?
    CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION);
    return new FieldsConsumer() {
        private Map<String, Long> fieldOffsets = new HashMap<>();

        @Override/*  w  w w  . j  ava  2s  .  co  m*/
        public void close() throws IOException {
            try { /*
                   * write the offsets per field such that we know where
                   * we need to load the FSTs from
                   */
                long pointer = output.getFilePointer();
                output.writeVInt(fieldOffsets.size());
                for (Map.Entry<String, Long> entry : fieldOffsets.entrySet()) {
                    output.writeString(entry.getKey());
                    output.writeVLong(entry.getValue());
                }
                output.writeLong(pointer);
            } finally {
                IOUtils.close(output);
            }
        }

        @Override
        public void write(Fields fields) throws IOException {
            for (String field : fields) {
                Terms terms = fields.terms(field);
                if (terms == null) {
                    continue;
                }
                TermsEnum termsEnum = terms.iterator();
                PostingsEnum docsEnum = null;
                final SuggestPayload spare = new SuggestPayload();
                int maxAnalyzedPathsForOneInput = 0;
                final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(
                        maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP);
                int docCount = 0;
                while (true) {
                    BytesRef term = termsEnum.next();
                    if (term == null) {
                        break;
                    }
                    docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS);
                    builder.startTerm(term);
                    int docFreq = 0;
                    while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                        for (int i = 0; i < docsEnum.freq(); i++) {
                            final int position = docsEnum.nextPosition();
                            AnalyzingCompletionLookupProviderV1.this.parsePayload(docsEnum.getPayload(), spare);
                            builder.addSurface(spare.surfaceForm.get(), spare.payload.get(), spare.weight);
                            // multi fields have the same surface form so we sum up here
                            maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
                        }
                        docFreq++;
                        docCount = Math.max(docCount, docsEnum.docID() + 1);
                    }
                    builder.finishTerm(docFreq);
                }
                /*
                 * Here we are done processing the field and we can
                 * buid the FST and write it to disk.
                 */
                FST<Pair<Long, BytesRef>> build = builder.build();
                assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: ["
                        + docCount + "]";
                /*
                 * it's possible that the FST is null if we have 2 segments that get merged
                 * and all docs that have a value in this field are deleted. This will cause
                 * a consumer to be created but it doesn't consume any values causing the FSTBuilder
                 * to return null.
                 */
                if (build != null) {
                    fieldOffsets.put(field, output.getFilePointer());
                    build.save(output);
                    /* write some more meta-info */
                    output.writeVInt(maxAnalyzedPathsForOneInput);
                    output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
                    output.writeInt(maxGraphExpansions); // can be negative
                    int options = 0;
                    options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
                    options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
                    options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
                    output.writeVInt(options);
                }
            }
        }
    };
}

From source file:org.elasticsearch.search.suggest.completion2x.AnalyzingCompletionLookupProvider.java

License:Apache License

@Override
public FieldsConsumer consumer(final IndexOutput output) throws IOException {
    CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION_LATEST);
    return new FieldsConsumer() {
        private Map<String, Long> fieldOffsets = new HashMap<>();

        @Override// ww  w .  j ava 2 s . c om
        public void close() throws IOException {
            try {
                /*
                 * write the offsets per field such that we know where
                 * we need to load the FSTs from
                 */
                long pointer = output.getFilePointer();
                output.writeVInt(fieldOffsets.size());
                for (Map.Entry<String, Long> entry : fieldOffsets.entrySet()) {
                    output.writeString(entry.getKey());
                    output.writeVLong(entry.getValue());
                }
                output.writeLong(pointer);
                CodecUtil.writeFooter(output);
            } finally {
                IOUtils.close(output);
            }
        }

        @Override
        public void write(Fields fields) throws IOException {
            for (String field : fields) {
                Terms terms = fields.terms(field);
                if (terms == null) {
                    continue;
                }
                TermsEnum termsEnum = terms.iterator();
                PostingsEnum docsEnum = null;
                final SuggestPayload spare = new SuggestPayload();
                int maxAnalyzedPathsForOneInput = 0;
                final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(
                        maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP);
                int docCount = 0;
                while (true) {
                    BytesRef term = termsEnum.next();
                    if (term == null) {
                        break;
                    }
                    docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS);
                    builder.startTerm(term);
                    int docFreq = 0;
                    while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                        for (int i = 0; i < docsEnum.freq(); i++) {
                            final int position = docsEnum.nextPosition();
                            AnalyzingCompletionLookupProvider.this.parsePayload(docsEnum.getPayload(), spare);
                            builder.addSurface(spare.surfaceForm.get(), spare.payload.get(), spare.weight);
                            // multi fields have the same surface form so we sum up here
                            maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
                        }
                        docFreq++;
                        docCount = Math.max(docCount, docsEnum.docID() + 1);
                    }
                    builder.finishTerm(docFreq);
                }
                /*
                 * Here we are done processing the field and we can
                 * buid the FST and write it to disk.
                 */
                FST<Pair<Long, BytesRef>> build = builder.build();
                assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: ["
                        + docCount + "]";
                /*
                 * it's possible that the FST is null if we have 2 segments that get merged
                 * and all docs that have a value in this field are deleted. This will cause
                 * a consumer to be created but it doesn't consume any values causing the FSTBuilder
                 * to return null.
                 */
                if (build != null) {
                    fieldOffsets.put(field, output.getFilePointer());
                    build.save(output);
                    /* write some more meta-info */
                    output.writeVInt(maxAnalyzedPathsForOneInput);
                    output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
                    output.writeInt(maxGraphExpansions); // can be negative
                    int options = 0;
                    options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
                    options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
                    options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
                    output.writeVInt(options);
                    output.writeVInt(XAnalyzingSuggester.SEP_LABEL);
                    output.writeVInt(XAnalyzingSuggester.END_BYTE);
                    output.writeVInt(XAnalyzingSuggester.PAYLOAD_SEP);
                    output.writeVInt(XAnalyzingSuggester.HOLE_CHARACTER);
                }
            }
        }
    };
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.java

License:Open Source License

/**
 * Compute the number of live documents. This method is SLOW.
 *//*from   w w  w . ja v  a  2  s  . c o m*/
private static int computeNumDocs(LeafReader reader, Query roleQuery, BitSet roleQueryBits) {
    final Bits liveDocs = reader.getLiveDocs();
    if (roleQueryBits == null) {
        return 0;
    } else if (liveDocs == null) {
        // slow
        return roleQueryBits.cardinality();
    } else {
        // very slow, but necessary in order to be correct
        int numDocs = 0;
        DocIdSetIterator it = new BitSetIterator(roleQueryBits, 0L); // we don't use the cost
        try {
            for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
                if (liveDocs.get(doc)) {
                    numDocs++;
                }
            }
            return numDocs;
        } catch (IOException e) {
            throw new UncheckedIOException(e);
        }
    }
}

From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper.java

License:Open Source License

static void intersectScorerAndRoleBits(Scorer scorer, SparseFixedBitSet roleBits, LeafCollector collector,
        Bits acceptDocs) throws IOException {
    // ConjunctionDISI uses the DocIdSetIterator#cost() to order the iterators, so if roleBits has the lowest cardinality it should
    // be used first:
    DocIdSetIterator iterator = ConjunctionDISI.intersectIterators(
            Arrays.asList(new BitSetIterator(roleBits, roleBits.approximateCardinality()), scorer.iterator()));
    for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) {
        if (acceptDocs == null || acceptDocs.get(docId)) {
            collector.collect(docId);/* www  . j a  v  a  2s .c  o  m*/
        }
    }
}

From source file:org.eu.bitzone.Leia.java

License:Apache License

public void showFirstTermDoc(final Object fText) {
    final Term t = (Term) getProperty(fText, "term");
    if (t == null) {
        return;/*from w  w  w .j av a 2 s .  c  o m*/
    }
    if (ir == null) {
        showStatus(MSG_NOINDEX);
        return;
    }
    if (ar == null) {
        errorMsg(MSG_LUCENE3828);
        return;
    }
    final SlowThread st = new SlowThread(this) {

        @Override
        public void execute() {
            try {

                final DocsAndPositionsEnum td = ar.termPositionsEnum(new Term(t.field(), t.bytes()));
                if (td == null) {
                    showStatus("No such term: " + t);
                    return;
                }
                if (td.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
                    showStatus("No documents with this term: " + t + " (NO_MORE_DOCS)");
                    return;
                }
                setString(find("tdNum"), "text", "1");
                putProperty(fText, "td", td);
                _showTermDoc(fText, td);
            } catch (final Exception e) {
                e.printStackTrace();
                showStatus(e.getMessage());
            }
        }
    };
    if (slowAccess) {
        st.start();
    } else {
        st.execute();
    }
}