Example usage for org.apache.lucene.index PostingsEnum NONE

List of usage examples for org.apache.lucene.index PostingsEnum NONE

Introduction

In this page you can find the example usage for org.apache.lucene.index PostingsEnum NONE.

Prototype

short NONE

To view the source code for org.apache.lucene.index PostingsEnum NONE.

Click Source Link

Document

Flag to pass to TermsEnum#postings(PostingsEnum,int) if you don't require per-document postings in the returned enum.

Usage

From source file:com.github.flaxsearch.resources.PostingsResource.java

License:Apache License

@GET
public TermData getPostings(@QueryParam("segment") Integer segment, @PathParam("field") String field,
        @PathParam("term") String term, @QueryParam("count") @DefaultValue("2147483647") int count)
        throws IOException {

    TermsEnum te = readerManager.findTermPostings(segment, field, term);
    Bits liveDocs = readerManager.getLiveDocs(segment);
    PostingsEnum pe = te.postings(null, PostingsEnum.NONE);

    int docFreq = te.docFreq();
    long totalTermFreq = te.totalTermFreq();

    int size = (docFreq < count) ? docFreq : count;
    int[] postings = new int[size];
    int docId;/*from   ww  w  .  j a  va  2 s. com*/
    int i = 0;
    while ((docId = pe.nextDoc()) != PostingsEnum.NO_MORE_DOCS && i < count) {
        if (liveDocs != null && liveDocs.get(docId) == false)
            continue;
        postings[i] = docId;
        i++;
    }
    return new TermData(term, docFreq, totalTermFreq, postings);
}

From source file:com.rocana.lucene.codec.v1.TestBlockPostingsFormat3.java

License:Apache License

/**
 * checks the terms enum sequentially/* w  ww . j  ava 2 s.c o m*/
 * if deep is false, it does a 'shallow' test that doesnt go down to the docsenums
 */
public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep,
        boolean hasPositions) throws Exception {
    BytesRef term;
    PostingsEnum leftPositions = null;
    PostingsEnum rightPositions = null;
    PostingsEnum leftDocs = null;
    PostingsEnum rightDocs = null;

    while ((term = leftTermsEnum.next()) != null) {
        assertEquals(term, rightTermsEnum.next());
        assertTermStats(leftTermsEnum, rightTermsEnum);
        if (deep) {
            if (hasPositions) {
                // with payloads + off
                assertDocsAndPositionsEnum(
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL));

                assertPositionsSkipping(leftTermsEnum.docFreq(),
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.ALL),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.ALL));
                // with payloads only
                assertDocsAndPositionsEnum(
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.PAYLOADS),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.PAYLOADS));

                assertPositionsSkipping(leftTermsEnum.docFreq(),
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.PAYLOADS),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.PAYLOADS));

                // with offsets only
                assertDocsAndPositionsEnum(
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.OFFSETS),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.OFFSETS));

                assertPositionsSkipping(leftTermsEnum.docFreq(),
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.OFFSETS),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.OFFSETS));

                // with positions only
                assertDocsAndPositionsEnum(
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.POSITIONS),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.POSITIONS));

                assertPositionsSkipping(leftTermsEnum.docFreq(),
                        leftPositions = leftTermsEnum.postings(leftPositions, PostingsEnum.POSITIONS),
                        rightPositions = rightTermsEnum.postings(rightPositions, PostingsEnum.POSITIONS));
            }

            // with freqs:
            assertDocsEnum(leftDocs = leftTermsEnum.postings(leftDocs),
                    rightDocs = rightTermsEnum.postings(rightDocs));

            // w/o freqs:
            assertDocsEnum(leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE),
                    rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE));

            // with freqs:
            assertDocsSkipping(leftTermsEnum.docFreq(), leftDocs = leftTermsEnum.postings(leftDocs),
                    rightDocs = rightTermsEnum.postings(rightDocs));

            // w/o freqs:
            assertDocsSkipping(leftTermsEnum.docFreq(),
                    leftDocs = leftTermsEnum.postings(leftDocs, PostingsEnum.NONE),
                    rightDocs = rightTermsEnum.postings(rightDocs, PostingsEnum.NONE));
        }
    }
    assertNull(rightTermsEnum.next());
}

From source file:de.unihildesheim.iw.lucene.search.EmptyFieldFilter.java

License:Open Source License

@Override
public DocIdSet getDocIdSet(@NotNull final LeafReaderContext context, @Nullable final Bits acceptDocs)
        throws IOException {
    FixedBitSet checkBits;/*  ww  w. j  a  v  a  2 s. c  o  m*/
    final LeafReader reader = context.reader();
    final int maxDoc = reader.maxDoc();

    BitSet finalBits = new SparseFixedBitSet(maxDoc);
    if (acceptDocs == null) {
        checkBits = BitsUtils.bits2FixedBitSet(reader.getLiveDocs());
        if (checkBits == null) {
            // all live
            checkBits = new FixedBitSet(maxDoc);
            checkBits.set(0, checkBits.length());
        }
    } else {
        checkBits = BitsUtils.bits2FixedBitSet(acceptDocs);
    }

    @Nullable
    final Terms terms = reader.terms(this.field);
    if (terms != null) {
        final int termsDocCount = terms.getDocCount();

        if (termsDocCount != 0) {
            if (termsDocCount == maxDoc) {
                // all matching
                finalBits = checkBits;
            } else {
                @Nullable
                final Terms t = reader.terms(this.field);
                if (t != null) {
                    PostingsEnum pe = null;
                    final TermsEnum te = t.iterator(null);
                    int docId;
                    while (te.next() != null) {
                        pe = te.postings(checkBits, pe, (int) PostingsEnum.NONE);
                        while ((docId = pe.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                            if (checkBits.getAndClear(docId)) {
                                finalBits.set(docId);
                            }
                        }
                    }
                }
            }
        }
    }
    return new BitDocIdSet(finalBits);
}

From source file:org.apache.solr.handler.component.AlfrescoLukeRequestHandler.java

License:Open Source License

protected static Document getFirstLiveDoc(Terms terms, LeafReader reader) throws IOException {
    TermsEnum termsEnum = terms.iterator();
    if (termsEnum.next() == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
        return null;
    }//  w ww  . j av  a  2s.  c om
    PostingsEnum postingsEnum = termsEnum.postings(null, PostingsEnum.NONE);
    final Bits liveDocs = reader.getLiveDocs();
    if (postingsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS
            || (liveDocs != null && liveDocs.get(postingsEnum.docID()))) {
        return null;
    }
    return reader.document(postingsEnum.docID());
}

From source file:org.apache.solr.handler.component.AlfrescoLukeRequestHandlerTest.java

License:Open Source License

@Before
public void setUp() throws IOException {
    initMocks(this);

    // Link up the mocks.
    when(mockTerms.iterator()).thenReturn(mockTermsEnum);
    when(mockTermsEnum.postings(null, PostingsEnum.NONE)).thenReturn(mockPostingsEnum);
}

From source file:org.apache.solr.search.DocSetBuilder.java

License:Apache License

/** Returns the number of terms visited */
public int add(TermsEnum te, int base) throws IOException {
    PostingsEnum postings = null;/*from www.  jav  a2 s . c om*/

    int termCount = 0;
    for (;;) {
        BytesRef term = te.next();
        if (term == null)
            break;
        termCount++;
        postings = te.postings(postings, PostingsEnum.NONE);
        add(postings, base);
    }

    return termCount;
}

From source file:org.apache.solr.search.DocSetUtil.java

License:Apache License

public static DocSet createDocSet(SolrIndexSearcher searcher, Term term) throws IOException {
    DirectoryReader reader = searcher.getRawReader(); // raw reader to avoid extra wrapping overhead
    int maxDoc = searcher.getIndexReader().maxDoc();
    int smallSetSize = smallSetSize(maxDoc);

    String field = term.field();//from   w  ww .j  a va  2 s .  com
    BytesRef termVal = term.bytes();

    int maxCount = 0;
    int firstReader = -1;
    List<LeafReaderContext> leaves = reader.leaves();
    PostingsEnum[] postList = new PostingsEnum[leaves.size()]; // use array for slightly higher scanning cost, but fewer memory allocations
    for (LeafReaderContext ctx : leaves) {
        assert leaves.get(ctx.ord) == ctx;
        LeafReader r = ctx.reader();
        Fields f = r.fields();
        Terms t = f.terms(field);
        if (t == null)
            continue; // field is missing
        TermsEnum te = t.iterator();
        if (te.seekExact(termVal)) {
            maxCount += te.docFreq();
            postList[ctx.ord] = te.postings(null, PostingsEnum.NONE);
            if (firstReader < 0)
                firstReader = ctx.ord;
        }
    }

    DocSet answer = null;
    if (maxCount == 0) {
        answer = DocSet.EMPTY;
    } else if (maxCount <= smallSetSize) {
        answer = createSmallSet(leaves, postList, maxCount, firstReader);
    } else {
        answer = createBigSet(leaves, postList, maxDoc, firstReader);
    }

    return DocSetUtil.getDocSet(answer, searcher);
}

From source file:org.apache.solr.search.facet.FacetFieldProcessorByEnumTermsStream.java

License:Apache License

private SimpleOrderedMap<Object> _nextBucket() throws IOException {
    DocSet termSet = null;//ww  w .  j a v a 2s.  c  om

    try {
        while (term != null) {

            if (startTermBytes != null && !StringHelper.startsWith(term, startTermBytes)) {
                break;
            }

            int df = termsEnum.docFreq();
            if (df < effectiveMincount) {
                term = termsEnum.next();
                continue;
            }

            if (termSet != null) {
                // termSet.decref(); // OFF-HEAP
                termSet = null;
            }

            int c = 0;

            if (hasSubFacets || df >= minDfFilterCache) {
                // use the filter cache

                if (deState == null) {
                    deState = new SolrIndexSearcher.DocsEnumState();
                    deState.fieldName = sf.getName();
                    deState.liveDocs = fcontext.searcher.getSlowAtomicReader().getLiveDocs();
                    deState.termsEnum = termsEnum;
                    deState.postingsEnum = postingsEnum;
                    deState.minSetSizeCached = minDfFilterCache;
                }

                if (hasSubFacets || !countOnly) {
                    DocSet termsAll = fcontext.searcher.getDocSet(deState);
                    termSet = docs.intersection(termsAll);
                    // termsAll.decref(); // OFF-HEAP
                    c = termSet.size();
                } else {
                    c = fcontext.searcher.numDocs(docs, deState);
                }
                postingsEnum = deState.postingsEnum;

                resetStats();

                if (!countOnly) {
                    collect(termSet, 0);
                }

            } else {
                // We don't need the docset here (meaning no sub-facets).
                // if countOnly, then we are calculating some other stats...
                resetStats();

                // lazy convert to fastForRandomSet
                if (fastForRandomSet == null) {
                    fastForRandomSet = docs;
                    if (docs instanceof SortedIntDocSet) { // OFF-HEAP todo: also check for native version
                        SortedIntDocSet sset = (SortedIntDocSet) docs;
                        fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
                    }
                }
                // iterate over TermDocs to calculate the intersection
                postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);

                if (postingsEnum instanceof MultiPostingsEnum) {
                    MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
                    int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
                    for (int subindex = 0; subindex < numSubs; subindex++) {
                        MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
                        if (sub.postingsEnum == null)
                            continue;
                        int base = sub.slice.start;
                        int docid;

                        if (countOnly) {
                            while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                                if (fastForRandomSet.exists(docid + base))
                                    c++;
                            }
                        } else {
                            setNextReader(leaves[sub.slice.readerIndex]);
                            while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                                if (fastForRandomSet.exists(docid + base)) {
                                    c++;
                                    collect(docid, 0);
                                }
                            }
                        }

                    }
                } else {
                    int docid;
                    if (countOnly) {
                        while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                            if (fastForRandomSet.exists(docid))
                                c++;
                        }
                    } else {
                        setNextReader(leaves[0]);
                        while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                            if (fastForRandomSet.exists(docid)) {
                                c++;
                                collect(docid, 0);
                            }
                        }
                    }
                }

            }

            if (c < effectiveMincount) {
                term = termsEnum.next();
                continue;
            }

            // handle offset and limit
            if (bucketsToSkip > 0) {
                bucketsToSkip--;
                term = termsEnum.next();
                continue;
            }

            if (freq.limit >= 0 && ++bucketsReturned > freq.limit) {
                return null;
            }

            // set count in case other stats depend on it
            countAcc.incrementCount(0, c);

            // OK, we have a good bucket to return... first get bucket value before moving to next term
            Object bucketVal = sf.getType().toObject(sf, term);
            TermQuery bucketQuery = hasSubFacets ? new TermQuery(new Term(freq.field, term)) : null;
            term = termsEnum.next();

            SimpleOrderedMap<Object> bucket = new SimpleOrderedMap<>();
            bucket.add("val", bucketVal);
            addStats(bucket, 0);
            if (hasSubFacets) {
                processSubs(bucket, bucketQuery, termSet);
            }

            // TODO... termSet needs to stick around for streaming sub-facets?

            return bucket;

        }

    } finally {
        if (termSet != null) {
            // termSet.decref();  // OFF-HEAP
            termSet = null;
        }
    }

    // end of the iteration
    return null;
}

From source file:org.apache.solr.uninverting.DocTermOrds.java

License:Apache License

/** Call this only once (if you subclass!) */
protected void uninvert(final LeafReader reader, Bits liveDocs, final BytesRef termPrefix) throws IOException {
    final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
    if (checkForDocValues && info != null && info.getDocValuesType() != DocValuesType.NONE) {
        throw new IllegalStateException(
                "Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
    }//from  w w  w  . ja  v  a 2 s .  c  om
    //System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
    final long startTime = System.nanoTime();
    prefix = termPrefix == null ? null : BytesRef.deepCopyOf(termPrefix);

    final int maxDoc = reader.maxDoc();
    final int[] index = new int[maxDoc]; // immediate term numbers, or the index into the byte[] representing the last number
    final int[] lastTerm = new int[maxDoc]; // last term we saw for this document
    final byte[][] bytes = new byte[maxDoc][]; // list of term numbers for the doc (delta encoded vInts)

    final Terms terms = reader.terms(field);
    if (terms == null) {
        // No terms
        return;
    }

    final TermsEnum te = terms.iterator();
    final BytesRef seekStart = termPrefix != null ? termPrefix : new BytesRef();
    //System.out.println("seekStart=" + seekStart.utf8ToString());
    if (te.seekCeil(seekStart) == TermsEnum.SeekStatus.END) {
        // No terms match
        return;
    }

    // For our "term index wrapper"
    final List<BytesRef> indexedTerms = new ArrayList<>();
    final PagedBytes indexedTermsBytes = new PagedBytes(15);

    // we need a minimum of 9 bytes, but round up to 12 since the space would
    // be wasted with most allocators anyway.
    byte[] tempArr = new byte[12];

    //
    // enumerate all terms, and build an intermediate form of the un-inverted field.
    //
    // During this intermediate form, every document has a (potential) byte[]
    // and the int[maxDoc()] array either contains the termNumber list directly
    // or the *end* offset of the termNumber list in its byte array (for faster
    // appending and faster creation of the final form).
    //
    // idea... if things are too large while building, we could do a range of docs
    // at a time (but it would be a fair amount slower to build)
    // could also do ranges in parallel to take advantage of multiple CPUs

    // OPTIONAL: remap the largest df terms to the lowest 128 (single byte)
    // values.  This requires going over the field first to find the most
    // frequent terms ahead of time.

    int termNum = 0;
    postingsEnum = null;

    // Loop begins with te positioned to first term (we call
    // seek above):
    for (;;) {
        final BytesRef t = te.term();
        if (t == null || (termPrefix != null && !StringHelper.startsWith(t, termPrefix))) {
            break;
        }
        //System.out.println("visit term=" + t.utf8ToString() + " " + t + " termNum=" + termNum);

        visitTerm(te, termNum);

        if ((termNum & indexIntervalMask) == 0) {
            // Index this term
            sizeOfIndexedStrings += t.length;
            BytesRef indexedTerm = new BytesRef();
            indexedTermsBytes.copy(t, indexedTerm);
            // TODO: really should 1) strip off useless suffix,
            // and 2) use FST not array/PagedBytes
            indexedTerms.add(indexedTerm);
        }

        final int df = te.docFreq();
        if (df <= maxTermDocFreq) {

            postingsEnum = te.postings(postingsEnum, PostingsEnum.NONE);

            // dF, but takes deletions into account
            int actualDF = 0;

            for (;;) {
                int doc = postingsEnum.nextDoc();
                if (doc == DocIdSetIterator.NO_MORE_DOCS) {
                    break;
                }
                //System.out.println("  chunk=" + chunk + " docs");

                actualDF++;
                termInstances++;

                //System.out.println("    docID=" + doc);
                // add TNUM_OFFSET to the term number to make room for special reserved values:
                // 0 (end term) and 1 (index into byte array follows)
                int delta = termNum - lastTerm[doc] + TNUM_OFFSET;
                lastTerm[doc] = termNum;
                int val = index[doc];

                if ((val & 0xff) == 1) {
                    // index into byte array (actually the end of
                    // the doc-specific byte[] when building)
                    int pos = val >>> 8;
                    int ilen = vIntSize(delta);
                    byte[] arr = bytes[doc];
                    int newend = pos + ilen;
                    if (newend > arr.length) {
                        // We avoid a doubling strategy to lower memory usage.
                        // this faceting method isn't for docs with many terms.
                        // In hotspot, objects have 2 words of overhead, then fields, rounded up to a 64-bit boundary.
                        // TODO: figure out what array lengths we can round up to w/o actually using more memory
                        // (how much space does a byte[] take up?  Is data preceded by a 32 bit length only?
                        // It should be safe to round up to the nearest 32 bits in any case.
                        int newLen = (newend + 3) & 0xfffffffc; // 4 byte alignment
                        byte[] newarr = new byte[newLen];
                        System.arraycopy(arr, 0, newarr, 0, pos);
                        arr = newarr;
                        bytes[doc] = newarr;
                    }
                    pos = writeInt(delta, arr, pos);
                    index[doc] = (pos << 8) | 1; // update pointer to end index in byte[]
                } else {
                    // OK, this int has data in it... find the end (a zero starting byte - not
                    // part of another number, hence not following a byte with the high bit set).
                    int ipos;
                    if (val == 0) {
                        ipos = 0;
                    } else if ((val & 0x0000ff80) == 0) {
                        ipos = 1;
                    } else if ((val & 0x00ff8000) == 0) {
                        ipos = 2;
                    } else if ((val & 0xff800000) == 0) {
                        ipos = 3;
                    } else {
                        ipos = 4;
                    }

                    //System.out.println("      ipos=" + ipos);

                    int endPos = writeInt(delta, tempArr, ipos);
                    //System.out.println("      endpos=" + endPos);
                    if (endPos <= 4) {
                        //System.out.println("      fits!");
                        // value will fit in the integer... move bytes back
                        for (int j = ipos; j < endPos; j++) {
                            val |= (tempArr[j] & 0xff) << (j << 3);
                        }
                        index[doc] = val;
                    } else {
                        // value won't fit... move integer into byte[]
                        for (int j = 0; j < ipos; j++) {
                            tempArr[j] = (byte) val;
                            val >>>= 8;
                        }
                        // point at the end index in the byte[]
                        index[doc] = (endPos << 8) | 1;
                        bytes[doc] = tempArr;
                        tempArr = new byte[12];
                    }
                }
            }
            setActualDocFreq(termNum, actualDF);
        }

        termNum++;
        if (te.next() == null) {
            break;
        }
    }

    numTermsInField = termNum;

    long midPoint = System.nanoTime();

    if (termInstances == 0) {
        // we didn't invert anything
        // lower memory consumption.
        tnums = null;
    } else {

        this.index = index;

        //
        // transform intermediate form into the final form, building a single byte[]
        // at a time, and releasing the intermediate byte[]s as we go to avoid
        // increasing the memory footprint.
        //

        for (int pass = 0; pass < 256; pass++) {
            byte[] target = tnums[pass];
            int pos = 0; // end in target;
            if (target != null) {
                pos = target.length;
            } else {
                target = new byte[4096];
            }

            // loop over documents, 0x00ppxxxx, 0x01ppxxxx, 0x02ppxxxx
            // where pp is the pass (which array we are building), and xx is all values.
            // each pass shares the same byte[] for termNumber lists.
            for (int docbase = pass << 16; docbase < maxDoc; docbase += (1 << 24)) {
                int lim = Math.min(docbase + (1 << 16), maxDoc);
                for (int doc = docbase; doc < lim; doc++) {
                    //System.out.println("  pass=" + pass + " process docID=" + doc);
                    int val = index[doc];
                    if ((val & 0xff) == 1) {
                        int len = val >>> 8;
                        //System.out.println("    ptr pos=" + pos);
                        index[doc] = (pos << 8) | 1; // change index to point to start of array
                        if ((pos & 0xff000000) != 0) {
                            // we only have 24 bits for the array index
                            throw new IllegalStateException(
                                    "Too many values for UnInvertedField faceting on field " + field);
                        }
                        byte[] arr = bytes[doc];
                        /*
                        for(byte b : arr) {
                          //System.out.println("      b=" + Integer.toHexString((int) b));
                        }
                        */
                        bytes[doc] = null; // IMPORTANT: allow GC to avoid OOM
                        if (target.length <= pos + len) {
                            int newlen = target.length;
                            /*** we don't have to worry about the array getting too large
                             * since the "pos" param will overflow first (only 24 bits available)
                            if ((newlen<<1) <= 0) {
                              // overflow...
                              newlen = Integer.MAX_VALUE;
                              if (newlen <= pos + len) {
                                throw new SolrException(400,"Too many terms to uninvert field!");
                              }
                            } else {
                              while (newlen <= pos + len) newlen<<=1;  // doubling strategy
                            }
                            ****/
                            while (newlen <= pos + len)
                                newlen <<= 1; // doubling strategy                 
                            byte[] newtarget = new byte[newlen];
                            System.arraycopy(target, 0, newtarget, 0, pos);
                            target = newtarget;
                        }
                        System.arraycopy(arr, 0, target, pos, len);
                        pos += len + 1; // skip single byte at end and leave it 0 for terminator
                    }
                }
            }

            // shrink array
            if (pos < target.length) {
                byte[] newtarget = new byte[pos];
                System.arraycopy(target, 0, newtarget, 0, pos);
                target = newtarget;
            }

            tnums[pass] = target;

            if ((pass << 16) > maxDoc)
                break;
        }

    }
    indexedTermsArray = indexedTerms.toArray(new BytesRef[indexedTerms.size()]);

    long endTime = System.nanoTime();

    total_time = (int) TimeUnit.MILLISECONDS.convert(endTime - startTime, TimeUnit.NANOSECONDS);
    phase1_time = (int) TimeUnit.MILLISECONDS.convert(midPoint - startTime, TimeUnit.NANOSECONDS);
}

From source file:org.codelibs.elasticsearch.common.lucene.index.FilterableTermsEnum.java

License:Apache License

public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter)
        throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }/*from ww w  . j av a  2  s.c  o  m*/
    this.docsEnumFlag = docsEnumFlag;
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            bits = BitSet.of(docs, context.reader().maxDoc());
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}