Example usage for org.apache.lucene.util.packed PackedLongValues iterator

List of usage examples for org.apache.lucene.util.packed PackedLongValues iterator

Introduction

In this page you can find the example usage for org.apache.lucene.util.packed PackedLongValues iterator.

Prototype

public Iterator iterator() 

Source Link

Document

Return an iterator over the values of this array.

Usage

From source file:org.elasticsearch.search.aggregations.bucket.BestBucketsDeferringCollector.java

License:Apache License

/**
 * Replay the wrapped collector, but only on a selection of buckets.
 *///from w  ww .ja  v a 2s .  c  o  m
@Override
public void prepareSelectedBuckets(long... selectedBuckets) throws IOException {
    if (!finished) {
        throw new IllegalStateException(
                "Cannot replay yet, collection is not finished: postCollect() has not been called");
    }
    if (this.selectedBuckets != null) {
        throw new IllegalStateException("Already been replayed");
    }

    final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE);
    for (long bucket : selectedBuckets) {
        hash.add(bucket);
    }
    this.selectedBuckets = hash;

    collector.preCollection();
    if (collector.needsScores()) {
        throw new IllegalStateException("Cannot defer if scores are needed");
    }

    for (Entry entry : entries) {
        final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context);
        leafCollector.setScorer(Lucene.illegalScorer("A limitation of the " + SubAggCollectionMode.BREADTH_FIRST
                + " collection mode is that scores cannot be buffered along with document IDs"));
        final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator();
        final PackedLongValues.Iterator buckets = entry.buckets.iterator();
        int doc = 0;
        for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) {
            doc += docDeltaIterator.next();
            final long bucket = buckets.next();
            final long rebasedBucket = hash.find(bucket);
            if (rebasedBucket != -1) {
                leafCollector.collect(doc, rebasedBucket);
            }
        }
    }

    collector.postCollection();
}

From source file:org.elasticsearch.search.aggregations.bucket.MergingBucketsDeferringCollector.java

License:Apache License

public void mergeBuckets(long[] mergeMap) {

    List<Entry> newEntries = new ArrayList<>(entries.size());
    for (Entry sourceEntry : entries) {
        PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
        for (PackedLongValues.Iterator itr = sourceEntry.buckets.iterator(); itr.hasNext();) {
            long bucket = itr.next();
            newBuckets.add(mergeMap[Math.toIntExact(bucket)]);
        }/*from w  ww  .  j  a  v  a  2s  . co  m*/
        newEntries.add(new Entry(sourceEntry.context, sourceEntry.docDeltas, newBuckets.build()));
    }
    entries = newEntries;

    // if there are buckets that have been collected in the current segment
    // we need to update the bucket ordinals there too
    if (buckets.size() > 0) {
        PackedLongValues currentBuckets = buckets.build();
        PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
        for (PackedLongValues.Iterator itr = currentBuckets.iterator(); itr.hasNext();) {
            long bucket = itr.next();
            newBuckets.add(mergeMap[Math.toIntExact(bucket)]);
        }
        buckets = newBuckets;
    }
}

From source file:org.elasticsearch.search.aggregations.bucket.MergingBucketsDeferringCollector.java

License:Apache License

/**
 * Replay the wrapped collector, but only on a selection of buckets.
 *///from   ww  w. j  a v  a  2 s .co m
@Override
public void prepareSelectedBuckets(long... selectedBuckets) throws IOException {
    if (finished == false) {
        throw new IllegalStateException(
                "Cannot replay yet, collection is not finished: postCollect() has not been called");
    }
    if (this.selectedBuckets != null) {
        throw new IllegalStateException("Already been replayed");
    }

    final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE);
    for (long bucket : selectedBuckets) {
        hash.add(bucket);
    }
    this.selectedBuckets = hash;

    boolean needsScores = collector.needsScores();
    Weight weight = null;
    if (needsScores) {
        weight = searchContext.searcher().createNormalizedWeight(searchContext.query(), true);
    }
    for (Entry entry : entries) {
        final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context);
        DocIdSetIterator docIt = null;
        if (needsScores && entry.docDeltas.size() > 0) {
            Scorer scorer = weight.scorer(entry.context);
            // We don't need to check if the scorer is null
            // since we are sure that there are documents to replay
            // (entry.docDeltas it not empty).
            docIt = scorer.iterator();
            leafCollector.setScorer(scorer);
        }
        final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator();
        final PackedLongValues.Iterator buckets = entry.buckets.iterator();
        int doc = 0;
        for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) {
            doc += docDeltaIterator.next();
            final long bucket = buckets.next();
            final long rebasedBucket = hash.find(bucket);
            if (rebasedBucket != -1) {
                if (needsScores) {
                    if (docIt.docID() < doc) {
                        docIt.advance(doc);
                    }
                    // aggregations should only be replayed on matching
                    // documents
                    assert docIt.docID() == doc;
                }
                leafCollector.collect(doc, rebasedBucket);
            }
        }
    }

    collector.postCollection();
}