Example usage for com.google.common.collect Iterators peekingIterator

List of usage examples for com.google.common.collect Iterators peekingIterator

Introduction

In this page you can find the example usage for com.google.common.collect Iterators peekingIterator.

Prototype

@Deprecated
public static <T> PeekingIterator<T> peekingIterator(PeekingIterator<T> iterator) 

Source Link

Document

Simply returns its argument.

Usage

From source file:org.opendaylight.tsdr.datastorage.TSDRStorageServiceImpl.java

@Override
public Future<RpcResult<GetTSDRAggregatedMetricsOutput>> getTSDRAggregatedMetrics(
        final GetTSDRAggregatedMetricsInput input) {

    if (this.metricPersistenceService == null) {
        RpcResultBuilder<GetTSDRAggregatedMetricsOutput> builder = RpcResultBuilder.failed();
        return builder.buildFuture();
    }/*  w  ww .ja  v a 2s .com*/

    // Locate the appropriate aggregation function implementation
    final AggregationFunction aggregationFunction = Iterators.find(aggregationFunctions.iterator(),
            new Predicate<AggregationFunction>() {
                @Override
                public boolean apply(AggregationFunction candidate) {
                    return candidate.getType().equals(input.getAggregation());
                }
            }, null);
    if (aggregationFunction == null) {
        return RpcResultBuilder.<GetTSDRAggregatedMetricsOutput>failed()
                .withError(ErrorType.APPLICATION, String.format(
                        "No aggregation function implementation was found for '%s'.", input.getAggregation()))
                .buildFuture();
    }

    // Gather the metrics for the given time span
    final GetTSDRMetricsInput metricsInput = new GetTSDRMetricsInputBuilder()
            .setTSDRDataCategory(input.getTSDRDataCategory()).setStartTime(input.getStartTime())
            .setEndTime(input.getEndTime()).build();
    final Future<RpcResult<GetTSDRMetricsOutput>> result = getTSDRMetrics(metricsInput);

    //Fix for bug 5655 - Do not aggregate when # of points is less than requested
    long numberOfPoints = (input.getEndTime() - input.getStartTime()) / input.getInterval();
    try {
        //In case of a MEAN aggregation and the number of requested points is larger than what is, just return the original
        //result.
        if (input.getAggregation() == AggregationType.MEAN
                && result.get().getResult().getMetrics().size() <= numberOfPoints) {
            final List<AggregatedMetrics> aggregatedMetrics = Lists.newLinkedList();
            for (Metrics m : result.get().getResult().getMetrics()) {
                // Aggregate the metrics in the interval
                aggregatedMetrics.add(new AggregatedMetricsBuilder().setTimeStamp(m.getTimeStamp())
                        .setMetricValue(m.getMetricValue()).build());
            }
            // We're done
            final GetTSDRAggregatedMetricsOutputBuilder outputBuilder = new GetTSDRAggregatedMetricsOutputBuilder()
                    .setAggregatedMetrics(aggregatedMetrics);
            return RpcResultBuilder.success(outputBuilder).buildFuture();

        }
    } catch (InterruptedException | ExecutionException e) {
        RpcResultBuilder builder = RpcResultBuilder.failed();
        builder.withError(ErrorType.APPLICATION, "Failed to extract data for aggregation");
        return builder.buildFuture();
    }

    // Aggregate the results
    return Futures.lazyTransform(result,
            new Function<RpcResult<GetTSDRMetricsOutput>, RpcResult<GetTSDRAggregatedMetricsOutput>>() {
                @Override
                public RpcResult<GetTSDRAggregatedMetricsOutput> apply(
                        RpcResult<GetTSDRMetricsOutput> metricsOutput) {
                    final List<AggregatedMetrics> aggregatedMetrics = Lists.newLinkedList();
                    final PeekingIterator<Metrics> metricIterator = Iterators
                            .peekingIterator(metricsOutput.getResult().getMetrics().iterator());
                    // Generate and iterate over all the intervals in the given range
                    for (Long intervalStartInclusive : new IntervalGenerator(input.getStartTime(),
                            input.getEndTime(), input.getInterval())) {
                        final Long intervalEndExclusive = intervalStartInclusive + input.getInterval();

                        // Gather the list of metrics that fall within the current interval
                        // We make the assumption that the list of metrics is already sorted by time-stamp
                        final List<Metrics> metricsInInterval = Lists.newLinkedList();
                        while (metricIterator.hasNext()) {
                            if (metricIterator.peek().getTimeStamp() >= intervalEndExclusive) {
                                break;
                            }
                            metricsInInterval.add(metricIterator.next());
                        }

                        // Aggregate the metrics in the interval
                        aggregatedMetrics.add(new AggregatedMetricsBuilder()
                                .setTimeStamp(intervalStartInclusive)
                                .setMetricValue(aggregationFunction.aggregate(metricsInInterval)).build());
                    }

                    // We're done
                    final GetTSDRAggregatedMetricsOutput output = new GetTSDRAggregatedMetricsOutputBuilder()
                            .setAggregatedMetrics(aggregatedMetrics).build();
                    return RpcResultBuilder.success(output).build();
                }
            });
}

From source file:com.digitalpetri.opcua.sdk.server.subscriptions.Subscription.java

private void returnNotifications(ServiceRequest<PublishRequest, PublishResponse> service) {
    LinkedHashSet<BaseMonitoredItem<?>> items = new LinkedHashSet<>();

    lastIterator.forEachRemaining(items::add);

    itemsById.values().stream().filter(item -> item.hasNotifications() || item.isTriggered())
            .forEach(items::add);//from   www . j av a2  s.co m

    PeekingIterator<BaseMonitoredItem<?>> iterator = Iterators.peekingIterator(items.iterator());

    gatherAndSend(iterator, Optional.of(service));

    lastIterator = iterator.hasNext() ? iterator : Iterators.emptyIterator();
}

From source file:org.eclipse.milo.opcua.sdk.server.subscriptions.Subscription.java

private void returnNotifications(ServiceRequest service) {
    LinkedHashSet<BaseMonitoredItem<?>> items = new LinkedHashSet<>();

    lastIterator.forEachRemaining(items::add);

    itemsById.values().stream().filter(item -> item.hasNotifications() || item.isTriggered())
            .forEach(items::add);//from ww  w. jav  a2s  .  c  o m

    PeekingIterator<BaseMonitoredItem<?>> iterator = Iterators.peekingIterator(items.iterator());

    gatherAndSend(iterator, service);

    lastIterator = iterator.hasNext() ? iterator : Collections.emptyIterator();
}

From source file:org.glowroot.ui.TraceCommonService.java

private static void writeEntries(JsonGenerator jg, List<Trace.Entry> entries) throws IOException {
    jg.writeStartArray();/* www  . ja v  a2 s . c  o m*/
    PeekingIterator<Trace.Entry> i = Iterators.peekingIterator(entries.iterator());
    while (i.hasNext()) {
        Trace.Entry entry = i.next();
        int depth = entry.getDepth();
        jg.writeStartObject();
        writeJson(entry, jg);
        int nextDepth = i.hasNext() ? i.peek().getDepth() : 0;
        if (nextDepth > depth) {
            jg.writeArrayFieldStart("childEntries");
        } else if (nextDepth < depth) {
            jg.writeEndObject();
            for (int j = depth; j > nextDepth; j--) {
                jg.writeEndArray();
                jg.writeEndObject();
            }
        } else {
            jg.writeEndObject();
        }
    }
    jg.writeEndArray();
}

From source file:com.google.cloud.genomics.localrepo.QueryEngine.java

private SearchReadsResponse searchReads(final QueryDescriptor descriptor,
        final Predicate<String> readsetFilter) {
    abstract class RecursiveProcessor<X, Y, Z> {

        abstract void close(Y value);

        abstract Y open(X key);

        final Z process(Iterable<X> keys) {
            return process(keys.iterator(), new HashMap<>());
        }//from   w  w  w  .j a  va  2 s  .  com

        private Z process(Iterator<X> iterator, Map<X, Y> map) {
            if (iterator.hasNext()) {
                X key = iterator.next();
                Y value = null;
                try {
                    map.put(key, value = open(key));
                    return process(iterator, map);
                } finally {
                    if (null != value) {
                        try {
                            close(value);
                        } catch (Exception e) {
                            LOGGER.warning(e.getMessage());
                        }
                    }
                }
            }
            return process(map);
        }

        abstract Z process(Map<X, Y> map);
    }
    return new RecursiveProcessor<Map.Entry<File, QueryDescriptor.Start>, SAMFileReader, SearchReadsResponse>() {

        @Override
        void close(SAMFileReader reader) {
            reader.close();
        }

        @Override
        SAMFileReader open(Map.Entry<File, QueryDescriptor.Start> entry) {
            return getBamFile.get(entry.getKey()).open();
        }

        @Override
        SearchReadsResponse process(Map<Map.Entry<File, QueryDescriptor.Start>, SAMFileReader> map) {
            final int end = descriptor.getEnd();
            return new RecursiveProcessor<Map.Entry<Map.Entry<File, QueryDescriptor.Start>, SAMFileReader>, SAMRecordIterator, SearchReadsResponse>() {

                @Override
                void close(SAMRecordIterator iterator) {
                    iterator.close();
                }

                @Override
                SAMRecordIterator open(Map.Entry<Map.Entry<File, QueryDescriptor.Start>, SAMFileReader> entry) {
                    QueryDescriptor.Start interval = entry.getKey().getValue();
                    SAMRecordIterator iterator = entry.getValue().queryOverlapping(interval.getSequence(),
                            interval.getStart(), end);
                    int skip = interval.getSkip();
                    for (int i = 0; iterator.hasNext() && i < skip; ++i) {
                        iterator.next();
                    }
                    return iterator;
                }

                private <Y, X extends Y> Stream<Stream<X>> partition(final Iterator<X> iterator,
                        final BiPredicate<Y, Y> equivalence) {
                    return StreamSupport.stream(new Spliterators.AbstractSpliterator<Stream<X>>(Long.MAX_VALUE,
                            Spliterator.IMMUTABLE) {

                        private final PeekingIterator<X> delegate = Iterators.peekingIterator(iterator);

                        @Override
                        public boolean tryAdvance(Consumer<? super Stream<X>> action) {
                            if (delegate.hasNext()) {
                                List<X> list = new ArrayList<>();
                                X first = delegate.next();
                                for (list.add(first); delegate.hasNext()
                                        && equivalence.test(first, delegate.peek()); list
                                                .add(delegate.next())) {
                                    ;
                                }
                                action.accept(list.stream());
                                return true;
                            }
                            return false;
                        }
                    }, false);
                }

                @Override
                SearchReadsResponse process(
                        Map<Map.Entry<Map.Entry<File, QueryDescriptor.Start>, SAMFileReader>, SAMRecordIterator> map) {
                    Map<File, PeekingIterator<SAMRecordWithSkip>> iterators = new HashMap<>();
                    for (Map.Entry<Map.Entry<Map.Entry<File, QueryDescriptor.Start>, SAMFileReader>, SAMRecordIterator> entry : map
                            .entrySet()) {
                        File file = entry.getKey().getKey().getKey();
                        iterators.put(file, Iterators.peekingIterator(partition(entry.getValue(),
                                (lhs, rhs) -> Objects.equals(lhs.getReferenceIndex(), rhs.getReferenceIndex())
                                        && Objects.equals(lhs.getAlignmentStart(), rhs.getAlignmentStart()))
                                                .map(stream -> stream
                                                        .map(new Function<SAMRecord, SAMRecordWithSkip>() {

                                                            private int skip = 0;

                                                            @Override
                                                            public SAMRecordWithSkip apply(SAMRecord record) {
                                                                return new SAMRecordWithSkip(file, record,
                                                                        skip++);
                                                            }
                                                        }))
                                                .flatMap(Function.identity()).iterator()));
                    }
                    return searchReads(iterators, end, readsetFilter);
                }
            }.process(map.entrySet());
        }
    }.process(descriptor.getStarts().entrySet());
}

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.strategy.SyncPlanPushStrategyFlatBatchImpl.java

static Map<Range<Integer>, Batch> mapBatchesToRanges(final List<Batch> inputBatchBag,
        final int failureIndexLimit) {
    final Map<Range<Integer>, Batch> batchMap = new LinkedHashMap<>();
    final PeekingIterator<Batch> batchPeekingIterator = Iterators.peekingIterator(inputBatchBag.iterator());
    while (batchPeekingIterator.hasNext()) {
        final Batch batch = batchPeekingIterator.next();
        final int nextBatchOrder = batchPeekingIterator.hasNext() ? batchPeekingIterator.peek().getBatchOrder()
                : failureIndexLimit;/*from w  w  w  .j  ava2  s . c  o  m*/
        batchMap.put(Range.closed(batch.getBatchOrder(), nextBatchOrder - 1), batch);
    }
    return batchMap;
}

From source file:org.geogit.repository.WorkingTree.java

/**
 * Inserts a collection of features into the working tree and updates the WORK_HEAD ref.
 * //w w w.  j  ava  2 s. c  o  m
 * @param treePath the path of the tree to insert the features into
 * @param features the features to insert
 * @param listener a {@link ProgressListener} for the current process
 * @param insertedTarget if provided, inserted features will be added to this list
 * @param collectionSize number of features to add
 * @throws Exception
 */
public void insert(final String treePath, Iterator<? extends Feature> features, final ProgressListener listener,
        @Nullable final List<Node> insertedTarget, @Nullable final Integer collectionSize) {

    checkArgument(collectionSize == null || collectionSize.intValue() > -1);

    final NodeRef treeRef;
    {
        Optional<NodeRef> typeTreeRef = commandLocator.command(FindTreeChild.class).setIndex(true)
                .setParent(getTree()).setChildPath(treePath).call();

        if (typeTreeRef.isPresent()) {
            treeRef = typeTreeRef.get();
        } else {
            Preconditions.checkArgument(features.hasNext(),
                    "Can't create new FeatureType tree %s as no features were provided, "
                            + "try using createTypeTree() first",
                    treePath);

            features = Iterators.peekingIterator(features);

            FeatureType featureType = ((PeekingIterator<Feature>) features).peek().getType();
            treeRef = createTypeTree(treePath, featureType);
        }
    }

    final ObjectId defaultMetadataId = treeRef.getMetadataId();
    final Map<Name, ObjectId> revFeatureTypes = Maps.newHashMap();

    final RevTreeBuilder typeTreeBuilder = commandLocator.command(FindOrCreateSubtree.class).setIndex(true)
            .setParent(Suppliers.ofInstance(Optional.of(getTree()))).setChildPath(treePath).call()
            .builder(indexDatabase);

    Iterator<RevObject> objects = Iterators.transform(features, new Function<Feature, RevObject>() {

        private RevFeatureBuilder builder = new RevFeatureBuilder();

        private int count;

        @Override
        public RevFeature apply(Feature feature) {
            final RevFeature revFeature = builder.build(feature);
            FeatureType featureType = feature.getType();
            ObjectId revFeatureTypeId = revFeatureTypes.get(featureType.getName());

            if (null == revFeatureTypeId) {
                RevFeatureType newFeatureType = RevFeatureType.build(featureType);
                revFeatureTypeId = newFeatureType.getId();
                indexDatabase.put(newFeatureType);
                revFeatureTypes.put(feature.getType().getName(), revFeatureTypeId);
            }

            ObjectId metadataId = defaultMetadataId.equals(revFeatureTypeId) ? ObjectId.NULL : revFeatureTypeId;
            Node node = createNode(metadataId, feature, revFeature);

            if (insertedTarget != null) {
                insertedTarget.add(node);
            }
            typeTreeBuilder.put(node);

            count++;
            if (collectionSize != null) {
                listener.progress((float) (count * 100) / collectionSize.intValue());
            }
            return revFeature;
        }

    });

    // System.err.println("\n inserting rev features...");
    // Stopwatch sw = new Stopwatch().start();
    listener.started();
    indexDatabase.putAll(objects);
    listener.complete();
    // sw.stop();
    // System.err.printf("\n%d features inserted in %s", collectionSize, sw);

    // System.err.println("\nBuilding final tree...");
    // sw.reset().start();
    RevTree newFeatureTree = typeTreeBuilder.build();
    indexDatabase.put(newFeatureTree);
    // sw.stop();
    // System.err.println("\nfinal tree built in " + sw);

    ObjectId newTree = commandLocator.command(WriteBack.class).setAncestor(getTreeSupplier())
            .setChildPath(treePath).setMetadataId(treeRef.getMetadataId()).setToIndex(true)
            .setTree(newFeatureTree).call();

    updateWorkHead(newTree);
}

From source file:org.jasig.portal.portlets.statistics.BaseStatisticsReportController.java

/**
 * Build the aggregation {@link DataTable}
 *///from   w ww.  ja v  a2  s  . c  o m
protected final DataTable buildAggregationReport(F form) throws TypeMismatchException {
    //Pull data out of form for per-group fetching
    final AggregationInterval interval = form.getInterval();
    final DateMidnight start = form.getStart();
    final DateMidnight end = form.getEnd();

    final DateTime startDateTime = start.toDateTime();
    //Use a query end of the end date at 23:59:59
    final DateTime endDateTime = end.plusDays(1).toDateTime().minusSeconds(1);

    //Get the list of DateTimes used on the X axis in the report
    final List<DateTime> reportTimes = this.intervalHelper.getIntervalStartDateTimesBetween(interval,
            startDateTime, endDateTime, maxIntervals);

    final Map<D, SortedSet<T>> groupedAggregations = createColumnDiscriminatorMap(form);

    //Determine the ValueType of the date/time column. Use the most specific column type possible
    final ValueType dateTimeColumnType;
    if (interval.isHasTimePart()) {
        //If start/end are the same day just display the time
        if (startDateTime.toDateMidnight().equals(endDateTime.toDateMidnight())) {
            dateTimeColumnType = ValueType.TIMEOFDAY;
        }
        //interval has time data and start/end are on different days, show full date time
        else {
            dateTimeColumnType = ValueType.DATETIME;
        }
    }
    //interval is date only
    else {
        dateTimeColumnType = ValueType.DATE;
    }

    //Setup the date/time column description
    final ColumnDescription dateTimeColumn;
    switch (dateTimeColumnType) {
    case TIMEOFDAY: {
        dateTimeColumn = new ColumnDescription("time", dateTimeColumnType, "Time");
        break;
    }
    default: {
        dateTimeColumn = new ColumnDescription("date", dateTimeColumnType, "Date");
    }
    }

    final DataTable table = new JsonDataTable();
    table.addColumn(dateTimeColumn);

    //Setup columns in the DataTable 
    final Set<D> columnGroups = groupedAggregations.keySet();
    for (final D columnMapping : columnGroups) {
        final Collection<ColumnDescription> columnDescriptions = this.getColumnDescriptions(columnMapping,
                form);
        table.addColumns(columnDescriptions);
    }

    //Query for all aggregation data in the time range for all groups.  Only the
    //interval and discriminator data is used from the keys.
    final Set<K> keys = createAggregationsQueryKeyset(columnGroups, form);
    final BaseAggregationDao<T, K> baseAggregationDao = this.getBaseAggregationDao();
    final Collection<T> aggregations = baseAggregationDao.getAggregations(startDateTime, endDateTime, keys,
            extractGroupsArray(columnGroups));

    //Organize the results by group and sort them chronologically by adding them to the sorted set
    for (final T aggregation : aggregations) {
        final D discriminator = aggregation.getAggregationDiscriminator();
        final SortedSet<T> results = groupedAggregations.get(discriminator);
        results.add(aggregation);
    }

    //Build Map from discriminator column mapping to result iterator to allow putting results into
    //the correct column AND the correct time slot in the column
    Comparator<? super D> comparator = getDiscriminatorComparator();
    final Map<D, PeekingIterator<T>> groupedAggregationIterators = new TreeMap<D, PeekingIterator<T>>(
            (comparator));
    for (final Entry<D, SortedSet<T>> groupedAggregationEntry : groupedAggregations.entrySet()) {
        groupedAggregationIterators.put(groupedAggregationEntry.getKey(),
                Iterators.peekingIterator(groupedAggregationEntry.getValue().iterator()));
    }

    /*
     * populate the data, filling in blank spots. The full list of interval DateTimes is used to create every row in the
     * query range. Then the iterator
     */
    for (final DateTime rowTime : reportTimes) {
        // create the row
        final TableRow row = new TableRow();

        // add the date to the first cell
        final Value dateTimeValue;
        switch (dateTimeColumnType) {
        case DATE: {
            dateTimeValue = new DateValue(rowTime.getYear(), rowTime.getMonthOfYear() - 1,
                    rowTime.getDayOfMonth());
            break;
        }
        case TIMEOFDAY: {
            dateTimeValue = new TimeOfDayValue(rowTime.getHourOfDay(), rowTime.getMinuteOfHour(), 0);
            break;
        }
        default: {
            dateTimeValue = new DateTimeValue(rowTime.getYear(), rowTime.getMonthOfYear() - 1,
                    rowTime.getDayOfMonth(), rowTime.getHourOfDay(), rowTime.getMinuteOfHour(), 0, 0);
            break;
        }
        }
        row.addCell(new TableCell(dateTimeValue));

        for (final PeekingIterator<T> groupedAggregationIteratorEntry : groupedAggregationIterators.values()) {
            List<Value> values = null;

            if (groupedAggregationIteratorEntry.hasNext()) {
                final T aggr = groupedAggregationIteratorEntry.peek();
                if (rowTime.equals(aggr.getDateTime())) {
                    //Data is for the correct time slot, advance the iterator
                    groupedAggregationIteratorEntry.next();

                    values = createRowValues(aggr, form);
                }
            }

            //Gap in the data, fill it in using a null aggregation
            if (values == null) {
                values = createRowValues(null, form);
            }

            //Add the values to the row
            for (final Value value : values) {
                row.addCell(value);
            }
        }

        table.addRow(row);
    }

    return table;
}

From source file:co.cask.cdap.data2.dataset2.lib.cube.DefaultCube.java

private Collection<TimeSeries> convertToQueryResult(CubeQuery query,
        Table<Map<String, String>, String, Map<Long, Long>> resultTable) {

    List<TimeSeries> result = Lists.newArrayList();
    // iterating each groupValue dimensions
    for (Map.Entry<Map<String, String>, Map<String, Map<Long, Long>>> row : resultTable.rowMap().entrySet()) {
        // iterating each measure
        for (Map.Entry<String, Map<Long, Long>> measureEntry : row.getValue().entrySet()) {
            // generating time series for a grouping and a measure
            int count = 0;
            List<TimeValue> timeValues = Lists.newArrayList();
            for (Map.Entry<Long, Long> timeValue : measureEntry.getValue().entrySet()) {
                timeValues.add(new TimeValue(timeValue.getKey(), timeValue.getValue()));
            }/*w w  w . j a v a 2s  . co  m*/
            Collections.sort(timeValues);
            PeekingIterator<TimeValue> timeValueItor = Iterators.peekingIterator(
                    new TimeSeriesInterpolator(timeValues, query.getInterpolator(), query.getResolution())
                            .iterator());
            List<TimeValue> resultTimeValues = Lists.newArrayList();
            while (timeValueItor.hasNext()) {
                TimeValue timeValue = timeValueItor.next();
                resultTimeValues.add(new TimeValue(timeValue.getTimestamp(), timeValue.getValue()));
                if (++count >= query.getLimit()) {
                    break;
                }
            }
            result.add(new TimeSeries(measureEntry.getKey(), row.getKey(), resultTimeValues));
        }
    }
    return result;
}

From source file:org.diqube.data.types.dbl.dict.FpcDoubleDictionary.java

/**
 * Iterates over this dicts values and the given other dicts values and identifies IDs where there are <, > and ==
 * relations./*from   w  ww .  j a v a  2s.  c  om*/
 * 
 * @param callback
 *          The callback will be called for _all_ found relations of every item in this dict. This means that for each
 *          item in this dict, there will be at least once the {@link IterationCallback#foundGreaterId(Long, Long)}
 *          and {@link IterationCallback#foundSmallerId(Long, Long)} called (at least once with the smallest greater
 *          ID and the greatest smaller ID). Additionally, {@link IterationCallback#foundEqualIds(Long, Long)} will be
 *          called if a valid pair is found.
 */
private void iterateOverValues(FpcDoubleDictionary fpcOther, IterationCallback callback) {
    PeekingIterator<Entry<Long, FpcPage>> otherIt = Iterators
            .peekingIterator(fpcOther.pages.entrySet().iterator());
    FpcPage otherFirstPage = otherIt.peek().getValue();
    double[] otherValues = otherFirstPage.get(0, otherFirstPage.getSize() - 1);

    int otherIdx = 0;
    for (Entry<Long, FpcPage> ourEntry : pages.entrySet()) {
        FpcPage ourPage = ourEntry.getValue();
        long ourFirstIdx = ourEntry.getKey();
        double[] ourValues = ourPage.get(0, ourPage.getSize() - 1);
        for (int i = 0; i < ourValues.length; i++) {
            double ourValue = ourValues[i];

            long otherFirstIdx = otherIt.peek().getKey();

            // move "otherIdx" to the right, until ourValue <= otherValues[otherNextIdx]
            while (ourValue > otherValues[otherIdx] && !DoubleUtil.equals(ourValue, otherValues[otherIdx])) {
                otherIdx++;

                if (otherIdx == otherValues.length) {
                    // end of other page, move to next page.
                    otherIt.next();
                    if (!otherIt.hasNext()) {
                        LongStream.rangeClosed(ourFirstIdx + i, highestId)
                                .forEach(ourId -> callback.foundSmallerId(ourId, fpcOther.highestId));
                        return;
                    }

                    otherFirstIdx = otherIt.peek().getKey();
                    otherIdx = 0;
                    FpcPage newOtherPage = otherIt.peek().getValue();
                    otherValues = newOtherPage.get(0, newOtherPage.getSize() - 1);
                }
            }

            if (DoubleUtil.equals(ourValue, otherValues[otherIdx])) {
                if (otherFirstIdx + otherIdx - 1 >= 0)
                    callback.foundSmallerId(ourFirstIdx + i, otherFirstIdx + otherIdx - 1);

                callback.foundEqualIds(ourFirstIdx + i, otherFirstIdx + otherIdx);

                if (otherFirstIdx + otherIdx + 1 <= fpcOther.highestId)
                    callback.foundGreaterId(ourFirstIdx + i, otherFirstIdx + otherIdx + 1);
            } else {
                // we know: ourValue < other value, but in the previous run of the while loop above, ourValue > other value.
                callback.foundGreaterId(ourFirstIdx + i, otherFirstIdx + otherIdx);

                if (otherFirstIdx + otherIdx - 1 >= 0)
                    callback.foundSmallerId(ourFirstIdx + i, otherFirstIdx + otherIdx - 1);
            }
        }
    }
}