Example usage for com.google.common.collect PeekingIterator hasNext

List of usage examples for com.google.common.collect PeekingIterator hasNext

Introduction

In this page you can find the example usage for com.google.common.collect PeekingIterator hasNext.

Prototype

boolean hasNext();

Source Link

Document

Returns true if the iteration has more elements.

Usage

From source file:org.geogit.api.plumbing.diff.DiffCounter.java

private DiffObjectCount countChildrenDiffs(Iterator<Node> leftTree, Iterator<Node> rightTree) {

    final Ordering<Node> storageOrder = new NodeStorageOrder();

    DiffObjectCount count = new DiffObjectCount();

    PeekingIterator<Node> left = Iterators.peekingIterator(leftTree);
    PeekingIterator<Node> right = Iterators.peekingIterator(rightTree);

    while (left.hasNext() && right.hasNext()) {
        Node peekLeft = left.peek();
        Node peekRight = right.peek();

        if (0 == storageOrder.compare(peekLeft, peekRight)) {
            // same path, consume both
            peekLeft = left.next();/*  www.  ja  v a  2 s.  c  o m*/
            peekRight = right.next();
            if (!peekLeft.getObjectId().equals(peekRight.getObjectId())) {
                // find the diffs between these two specific refs
                if (RevObject.TYPE.FEATURE.equals(peekLeft.getType())) {
                    checkState(RevObject.TYPE.FEATURE.equals(peekRight.getType()));
                    count.addFeatures(1);
                } else {
                    checkState(RevObject.TYPE.TREE.equals(peekLeft.getType()));
                    checkState(RevObject.TYPE.TREE.equals(peekRight.getType()));
                    ObjectId leftTreeId = peekLeft.getObjectId();
                    ObjectId rightTreeId = peekRight.getObjectId();
                    count.add(countDiffs(leftTreeId, rightTreeId));
                }
            }
        } else if (peekLeft == storageOrder.min(peekLeft, peekRight)) {
            peekLeft = left.next();// consume only the left value
            count.add(aggregateSize(ImmutableList.of(peekLeft)));
        } else {
            peekRight = right.next();// consume only the right value
            count.add(aggregateSize(ImmutableList.of(peekRight)));
        }
    }

    if (left.hasNext()) {
        count.add(countRemaining(left));
    } else if (right.hasNext()) {
        count.add(countRemaining(right));
    }
    Preconditions.checkState(!left.hasNext());
    Preconditions.checkState(!right.hasNext());
    return count;
}

From source file:google.registry.backup.RestoreCommitLogsAction.java

/**
 * Restore the contents of one transaction to datastore.
 *
 * <p>The objects to delete are listed in the {@link CommitLogManifest}, which will be the first
 * object in the iterable. The objects to save follow, each as a {@link CommitLogMutation}. We
 * restore by deleting the deletes and recreating the saves from their proto form. We also save
 * the commit logs themselves back to datastore, so that the commit log system itself is
 * transparently restored alongside the data.
 *
 * @return the manifest, for use in restoring the {@link CommitLogBucket}.
 *//*from   ww w  .  j a  v a  2s.c  o  m*/
private CommitLogManifest restoreOneTransaction(PeekingIterator<ImmutableObject> commitLogs) {
    final CommitLogManifest manifest = (CommitLogManifest) commitLogs.next();
    Result<?> deleteResult = deleteAsync(manifest.getDeletions());
    List<Entity> entitiesToSave = Lists.newArrayList(ofy().save().toEntity(manifest));
    while (commitLogs.hasNext() && commitLogs.peek() instanceof CommitLogMutation) {
        CommitLogMutation mutation = (CommitLogMutation) commitLogs.next();
        entitiesToSave.add(ofy().save().toEntity(mutation));
        entitiesToSave.add(EntityTranslator.createFromPbBytes(mutation.getEntityProtoBytes()));
    }
    saveRaw(entitiesToSave);
    try {
        deleteResult.now();
    } catch (Exception e) {
        retry(new Runnable() {
            @Override
            public void run() {
                deleteAsync(manifest.getDeletions()).now();
            }
        });
    }
    return manifest;
}

From source file:co.cask.cdap.metrics.query.MetricStoreRequestExecutor.java

public JsonElement executeQuery(MetricDataQuery query) throws Exception {
    // Pretty ugly logic now. Need to refactor
    Object resultObj;/*from ww  w .j  a va  2 s . c  o  m*/
    if (query.getResolution() != Integer.MAX_VALUE) {
        TimeSeriesResponse.Builder builder = TimeSeriesResponse.builder(query.getStartTs(), query.getEndTs());
        // Special metrics handle that requires computation from multiple time series.
        if (query.getMetrics().containsKey("system.process.busyness")) {
            computeProcessBusyness(query, builder);
        } else {
            PeekingIterator<TimeValue> timeValueItor = Iterators.peekingIterator(queryTimeSeries(query));

            long resultTimeStamp = (query.getStartTs() / query.getResolution()) * query.getResolution();

            for (int i = 0; i < query.getLimit(); i++) {
                if (timeValueItor.hasNext() && timeValueItor.peek().getTimestamp() == resultTimeStamp) {
                    builder.addData(resultTimeStamp, timeValueItor.next().getValue());
                } else {
                    // If the scan result doesn't have value for a timestamp, we add 0 to the result-returned for that timestamp
                    builder.addData(resultTimeStamp, 0);
                }
                resultTimeStamp += query.getResolution();
            }
        }
        resultObj = builder.build();

    } else {
        // Special metrics handle that requires computation from multiple aggregates results.
        if (query.getMetrics().containsKey("system.process.events.pending")) {
            resultObj = computeFlowletPending(query);
        } else {
            resultObj = getAggregates(query);
        }
    }

    return GSON.toJsonTree(resultObj);
}

From source file:com.facebook.presto.raptor.systemtables.TableMetadataPageSource.java

private Iterator<Page> loadPages(TupleDomain<Integer> tupleDomain) {
    Map<Integer, Domain> domains = tupleDomain.getDomains().get();
    Domain schemaNameDomain = domains.get(getColumnIndex(tableMetadata, SCHEMA_NAME));
    Domain tableNameDomain = domains.get(getColumnIndex(tableMetadata, TABLE_NAME));

    String schemaName = schemaNameDomain == null ? null
            : getStringValue(schemaNameDomain.getSingleValue()).toLowerCase(ENGLISH);
    String tableName = tableNameDomain == null ? null
            : getStringValue(tableNameDomain.getSingleValue()).toLowerCase(ENGLISH);

    ImmutableList.Builder<Page> pages = ImmutableList.builder();
    PageBuilder pageBuilder = new PageBuilder(types);

    List<TableMetadataRow> tableRows = dao.getTableMetadataRows(schemaName, tableName);
    PeekingIterator<ColumnMetadataRow> columnRowIterator = peekingIterator(
            dao.getColumnMetadataRows(schemaName, tableName).iterator());

    for (TableMetadataRow tableRow : tableRows) {
        while (columnRowIterator.hasNext() && columnRowIterator.peek().getTableId() < tableRow.getTableId()) {
            columnRowIterator.next();/*  ww w.j a  va 2 s. co m*/
        }

        String temporalColumnName = null;
        SortedMap<Integer, String> sortColumnNames = new TreeMap<>();
        SortedMap<Integer, String> bucketColumnNames = new TreeMap<>();
        OptionalLong temporalColumnId = tableRow.getTemporalColumnId();
        while (columnRowIterator.hasNext() && columnRowIterator.peek().getTableId() == tableRow.getTableId()) {
            ColumnMetadataRow columnRow = columnRowIterator.next();
            if (temporalColumnId.isPresent() && columnRow.getColumnId() == temporalColumnId.getAsLong()) {
                temporalColumnName = columnRow.getColumnName();
            }
            OptionalInt sortOrdinalPosition = columnRow.getSortOrdinalPosition();
            if (sortOrdinalPosition.isPresent()) {
                sortColumnNames.put(sortOrdinalPosition.getAsInt(), columnRow.getColumnName());
            }
            OptionalInt bucketOrdinalPosition = columnRow.getBucketOrdinalPosition();
            if (bucketOrdinalPosition.isPresent()) {
                bucketColumnNames.put(bucketOrdinalPosition.getAsInt(), columnRow.getColumnName());
            }
        }

        pageBuilder.declarePosition();

        // schema_name, table_name
        VARCHAR.writeSlice(pageBuilder.getBlockBuilder(0), utf8Slice(tableRow.getSchemaName()));
        VARCHAR.writeSlice(pageBuilder.getBlockBuilder(1), utf8Slice(tableRow.getTableName()));

        // temporal_column
        if (temporalColumnId.isPresent()) {
            if (temporalColumnName == null) {
                throw new PrestoException(RAPTOR_CORRUPT_METADATA,
                        format("Table ID %s has corrupt metadata (invalid temporal column ID)",
                                tableRow.getTableId()));
            }
            VARCHAR.writeSlice(pageBuilder.getBlockBuilder(2), utf8Slice(temporalColumnName));
        } else {
            pageBuilder.getBlockBuilder(2).appendNull();
        }

        // ordering_columns
        if (!sortColumnNames.isEmpty()) {
            BlockBuilder orderingColumnsBlockBuilder = pageBuilder.getBlockBuilder(3).beginBlockEntry();
            for (String sortColumnName : sortColumnNames.values()) {
                VARCHAR.writeSlice(orderingColumnsBlockBuilder, utf8Slice(sortColumnName));
            }
            pageBuilder.getBlockBuilder(3).closeEntry();
        } else {
            pageBuilder.getBlockBuilder(3).appendNull();
        }

        // distribution_name
        Optional<String> distributionName = tableRow.getDistributionName();
        if (distributionName.isPresent()) {
            VARCHAR.writeSlice(pageBuilder.getBlockBuilder(4), utf8Slice(distributionName.get()));
        } else {
            pageBuilder.getBlockBuilder(4).appendNull();
        }

        // bucket_count
        OptionalInt bucketCount = tableRow.getBucketCount();
        if (bucketCount.isPresent()) {
            BIGINT.writeLong(pageBuilder.getBlockBuilder(5), bucketCount.getAsInt());
        } else {
            pageBuilder.getBlockBuilder(5).appendNull();
        }

        // bucketing_columns
        if (!bucketColumnNames.isEmpty()) {
            BlockBuilder bucketColumnsBlockBuilder = pageBuilder.getBlockBuilder(6).beginBlockEntry();
            for (String bucketColumnName : bucketColumnNames.values()) {
                VARCHAR.writeSlice(bucketColumnsBlockBuilder, utf8Slice(bucketColumnName));
            }
            pageBuilder.getBlockBuilder(6).closeEntry();
        } else {
            pageBuilder.getBlockBuilder(6).appendNull();
        }

        if (pageBuilder.isFull()) {
            flushPage(pageBuilder, pages);
        }
    }

    flushPage(pageBuilder, pages);
    return pages.build().iterator();
}

From source file:org.apache.jackrabbit.oak.plugins.document.RevisionVector.java

@CheckForNull
private Revision peekRevision(PeekingIterator<Revision> it, int minClusterId) {
    while (it.hasNext() && it.peek().getClusterId() < minClusterId) {
        it.next();//w w  w . j a v  a  2s  . com
    }
    return it.hasNext() ? it.peek() : null;
}

From source file:org.eclipse.milo.opcua.binaryschema.AbstractCodec.java

@Override
public void encode(SerializationContext context, StructureT structure, OpcUaBinaryStreamEncoder encoder)
        throws UaSerializationException {

    LinkedHashMap<String, MemberT> members = new LinkedHashMap<>(getMembers(structure));

    PeekingIterator<FieldType> fieldIterator = Iterators.peekingIterator(structuredType.getField().iterator());

    while (fieldIterator.hasNext()) {
        FieldType field = fieldIterator.next();

        if (!fieldIsPresent(field, members)) {
            continue;
        }/*from  w  w  w.j av  a  2  s.c  o  m*/

        String typeName = field.getTypeName().getLocalPart();
        String typeNamespace = field.getTypeName().getNamespaceURI();

        MemberT member = members.get(field.getName());

        boolean typeNamespaceIsUa = Namespaces.OPC_UA.equals(typeNamespace)
                || Namespaces.OPC_UA_BSD.equals(typeNamespace);

        if (fieldIsScalar(field)) {
            Object scalarValue = memberTypeToOpcUaScalar(member, typeName);

            if (typeNamespaceIsUa && WRITERS.containsKey(typeName)) {
                WRITERS.get(typeName).accept(encoder, scalarValue);
            } else {
                context.encode(typeNamespace, typeName, scalarValue, encoder);
            }
        } else {
            if (field.isIsLengthInBytes()) {
                throw new UaSerializationException(StatusCodes.Bad_EncodingError,
                        "IsLengthInBytes=true not supported");
            }

            int length = fieldLength(field, members);

            if ("Bit".equals(typeName) && typeNamespaceIsUa) {
                Number number = (Number) memberTypeToOpcUaArray(member, typeName);
                BigInteger bi = BigInteger.valueOf(number.longValue());

                for (int i = 0; i < length; i++) {
                    encoder.writeBit(bi.shiftRight(i).and(BigInteger.ONE).intValue());
                }
            } else {
                Object[] valueArray = (Object[]) memberTypeToOpcUaArray(member, typeName);

                if (valueArray != null) {
                    if (typeNamespaceIsUa && WRITERS.containsKey(typeName)) {
                        for (int i = 0; i < length; i++) {
                            Object value = valueArray[i];

                            WRITERS.get(typeName).accept(encoder, value);
                        }
                    } else {
                        for (int i = 0; i < length; i++) {
                            Object value = valueArray[i];

                            context.encode(typeNamespace, typeName, value, encoder);
                        }
                    }
                }
            }
        }
    }
}

From source file:org.eclipse.milo.opcua.binaryschema.AbstractCodec.java

@Override
public StructureT decode(SerializationContext context, OpcUaBinaryStreamDecoder decoder)
        throws UaSerializationException {

    LinkedHashMap<String, MemberT> members = new LinkedHashMap<>();

    PeekingIterator<FieldType> fieldIterator = Iterators.peekingIterator(structuredType.getField().iterator());

    while (fieldIterator.hasNext()) {
        FieldType field = fieldIterator.next();
        String fieldName = field.getName();
        String typeName = field.getTypeName().getLocalPart();
        String typeNamespace = field.getTypeName().getNamespaceURI();

        if (!fieldIsPresent(field, members)) {
            continue;
        }//from   w w  w. j  av  a 2 s  .  com

        boolean typeNamespaceIsUa = Namespaces.OPC_UA.equals(typeNamespace)
                || Namespaces.OPC_UA_BSD.equals(typeNamespace);

        if (fieldIsScalar(field)) {
            if (typeNamespaceIsUa && READERS.containsKey(typeName)) {
                Object value = READERS.get(typeName).apply(decoder);

                members.put(fieldName, opcUaToMemberTypeScalar(fieldName, value, typeName));
            } else {
                Object value = context.decode(typeNamespace, typeName, decoder);

                members.put(fieldName, opcUaToMemberTypeScalar(fieldName, value, typeName));
            }
        } else {
            if (field.isIsLengthInBytes()) {
                throw new UaSerializationException(StatusCodes.Bad_DecodingError,
                        "IsLengthInBytes=true not supported");
            }

            int length = fieldLength(field, members);

            if ("Bit".equals(typeName) && typeNamespaceIsUa) {
                BigInteger bitAccumulation = BigInteger.valueOf(0L);

                for (int i = 0; i < length; i++) {
                    BigInteger bitValue = BigInteger.valueOf(decoder.readBit());

                    bitAccumulation = bitAccumulation.or(bitValue.shiftLeft(i));
                }

                members.put(fieldName, opcUaToMemberTypeArray(fieldName, bitAccumulation.intValue(), typeName));
            } else {
                Object[] values = new Object[length];

                if (typeNamespaceIsUa && READERS.containsKey(typeName)) {
                    for (int i = 0; i < length; i++) {
                        Object value = READERS.get(typeName).apply(decoder);

                        values[i] = value;
                    }
                } else {
                    for (int i = 0; i < length; i++) {
                        Object value = context.decode(typeNamespace, typeName, decoder);

                        values[i] = value;
                    }
                }

                members.put(fieldName, opcUaToMemberTypeArray(fieldName, values, typeName));
            }
        }
    }

    return createStructure(structuredType.getName(), members);
}

From source file:com.digitalpetri.opcua.server.ctt.CttNamespace.java

private UaObjectNode addFoldersToRoot(UaNode root, String path) {
    if (path.startsWith("/"))
        path = path.substring(1, path.length());
    String[] elements = path.split("/");

    LinkedList<UaObjectNode> folderNodes = processPathElements(Lists.newArrayList(elements),
            Lists.newArrayList(), Lists.newLinkedList());

    UaObjectNode firstNode = folderNodes.getFirst();

    if (!nodes.containsKey(firstNode.getNodeId())) {
        nodes.put(firstNode.getNodeId(), firstNode);

        nodes.get(root.getNodeId()).addReference(new Reference(root.getNodeId(), Identifiers.Organizes,
                firstNode.getNodeId().expanded(), firstNode.getNodeClass(), true));

        logger.debug("Added reference: {} -> {}", root.getNodeId(), firstNode.getNodeId());
    }/*from   w w  w .jav  a 2 s. c  o m*/

    PeekingIterator<UaObjectNode> iterator = Iterators.peekingIterator(folderNodes.iterator());

    while (iterator.hasNext()) {
        UaObjectNode node = iterator.next();

        nodes.putIfAbsent(node.getNodeId(), node);

        if (iterator.hasNext()) {
            UaObjectNode next = iterator.peek();

            if (!nodes.containsKey(next.getNodeId())) {
                nodes.put(next.getNodeId(), next);

                nodes.get(node.getNodeId()).addReference(new Reference(node.getNodeId(), Identifiers.Organizes,
                        next.getNodeId().expanded(), next.getNodeClass(), true));

                logger.debug("Added reference: {} -> {}", node.getNodeId(), next.getNodeId());
            }
        }
    }

    return folderNodes.getLast();
}

From source file:com.github.rinde.logistics.pdptw.solver.optaplanner.ScoreCalculator.java

@Nullable
ParcelVisit updateRouteRemovals(Vehicle v) {
    final List<ParcelVisit> prevRoute = ImmutableList.copyOf(routes.get(v));
    final List<ParcelVisit> newRoute = updateCurRoute(v);

    final PeekingIterator<ParcelVisit> prevIt = Iterators.peekingIterator(prevRoute.iterator());
    final PeekingIterator<ParcelVisit> newIt = Iterators.peekingIterator(newRoute.iterator());

    while (prevIt.hasNext() && newIt.hasNext() && prevIt.peek().equals(newIt.peek())) {
        // advance both iterators until we are at the position of the first
        // difference
        prevIt.next();//from  www . ja va2s. co m
        newIt.next();
    }

    while (prevIt.hasNext()) {
        remove(prevIt.next());
    }
    if (newIt.hasNext()) {
        return newIt.peek();
    } else {
        return null;
    }
}

From source file:org.apache.accumulo.gc.GarbageCollectionAlgorithm.java

protected void confirmDeletesFromReplication(Iterator<Entry<String, Status>> replicationNeededIterator,
        Iterator<Entry<String, String>> candidateMapIterator) {
    PeekingIterator<Entry<String, Status>> pendingReplication = Iterators
            .peekingIterator(replicationNeededIterator);
    PeekingIterator<Entry<String, String>> candidates = Iterators.peekingIterator(candidateMapIterator);
    while (pendingReplication.hasNext() && candidates.hasNext()) {
        Entry<String, Status> pendingReplica = pendingReplication.peek();
        Entry<String, String> candidate = candidates.peek();

        String filePendingReplication = pendingReplica.getKey();
        String fullPathCandidate = candidate.getValue();

        int comparison = filePendingReplication.compareTo(fullPathCandidate);
        if (comparison < 0) {
            pendingReplication.next();/* ww  w  .j  ava2 s  . c  o m*/
        } else if (comparison > 1) {
            candidates.next();
        } else {
            // We want to advance both, and try to delete the candidate if we can
            candidates.next();
            pendingReplication.next();

            // We cannot delete a file if it is still needed for replication
            if (!StatusUtil.isSafeForRemoval(pendingReplica.getValue())) {
                // If it must be replicated, we must remove it from the candidate set to prevent deletion
                candidates.remove();
            }
        }
    }
}