Example usage for com.google.common.collect PeekingIterator peek

List of usage examples for com.google.common.collect PeekingIterator peek

Introduction

In this page you can find the example usage for com.google.common.collect PeekingIterator peek.

Prototype

E peek();

Source Link

Document

Returns the next element in the iteration, without advancing the iteration.

Usage

From source file:org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy.java

/**
 * We receive store files sorted in ascending order by seqId then scan the list of files. If the
 * current file has a maxTimestamp older than last known maximum, treat this file as it carries
 * the last known maximum. This way both seqId and timestamp are in the same order. If files carry
 * the same maxTimestamps, they are ordered by seqId. We then reverse the list so they are ordered
 * by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order
 * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id.
 */// w w w . j  a v  a 2 s  .com
public CompactionRequest selectMinorCompaction(ArrayList<StoreFile> candidateSelection, boolean mayUseOffPeak,
        boolean mayBeStuck) throws IOException {
    long now = EnvironmentEdgeManager.currentTime();
    long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now);

    List<Pair<StoreFile, Long>> storefileMaxTimestampPairs = Lists
            .newArrayListWithCapacity(candidateSelection.size());
    long maxTimestampSeen = Long.MIN_VALUE;
    for (StoreFile storeFile : candidateSelection) {
        // if there is out-of-order data,
        // we put them in the same window as the last file in increasing order
        maxTimestampSeen = Math.max(maxTimestampSeen,
                storeFile.getMaximumTimestamp() == null ? Long.MIN_VALUE : storeFile.getMaximumTimestamp());
        storefileMaxTimestampPairs.add(new Pair<StoreFile, Long>(storeFile, maxTimestampSeen));
    }
    Collections.reverse(storefileMaxTimestampPairs);

    CompactionWindow window = getIncomingWindow(now);
    int minThreshold = comConf.getDateTieredIncomingWindowMin();
    PeekingIterator<Pair<StoreFile, Long>> it = Iterators
            .peekingIterator(storefileMaxTimestampPairs.iterator());
    while (it.hasNext()) {
        if (window.compareToTimestamp(oldestToCompact) < 0) {
            break;
        }
        int compResult = window.compareToTimestamp(it.peek().getSecond());
        if (compResult > 0) {
            // If the file is too old for the window, switch to the next window
            window = window.nextEarlierWindow();
            minThreshold = comConf.getMinFilesToCompact();
        } else {
            // The file is within the target window
            ArrayList<StoreFile> fileList = Lists.newArrayList();
            // Add all files in the same window. For incoming window
            // we tolerate files with future data although it is sub-optimal
            while (it.hasNext() && window.compareToTimestamp(it.peek().getSecond()) <= 0) {
                fileList.add(it.next().getFirst());
            }
            if (fileList.size() >= minThreshold) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Processing files: " + fileList + " for window: " + window);
                }
                DateTieredCompactionRequest request = generateCompactionRequest(fileList, window, mayUseOffPeak,
                        mayBeStuck, minThreshold);
                if (request != null) {
                    return request;
                }
            }
        }
    }
    // A non-null file list is expected by HStore
    return new CompactionRequest(Collections.<StoreFile>emptyList());
}

From source file:edu.sdsc.scigraph.neo4j.HierarchyVisitor.java

void traverse(Node... roots) {
    TraversalDescription description = graph.traversalDescription().uniqueness(Uniqueness.RELATIONSHIP_PATH)
            .depthFirst().expand(new PathExpander<Void>() {

                @Override/* w  w  w .j a  va  2  s.c  o m*/
                public Iterable<Relationship> expand(Path path, BranchState<Void> state) {
                    Set<Relationship> relationships = new HashSet<>();
                    addAll(relationships, path.endNode().getRelationships(Direction.INCOMING,
                            OwlRelationships.RDFS_SUBCLASS_OF));
                    if (includeEquivalentClasses && null != path.lastRelationship()
                            && !path.lastRelationship().isType(OwlRelationships.OWL_EQUIVALENT_CLASS)) {
                        addAll(relationships,
                                path.endNode().getRelationships(OwlRelationships.OWL_EQUIVALENT_CLASS));
                    }
                    return relationships;
                }

                @Override
                public PathExpander<Void> reverse() {
                    return null;
                }
            });

    try (Transaction tx = graph.beginTx()) {
        for (Path position : description.traverse(roots)) {
            List<Node> path = new ArrayList<>();
            PeekingIterator<PropertyContainer> iter = Iterators.peekingIterator(position.iterator());
            while (iter.hasNext()) {
                PropertyContainer container = iter.next();
                if (container instanceof Node) {
                    if (((Node) container).hasLabel(OwlLabels.OWL_ANONYMOUS)) {
                        // Ignore paths with anonymous nodes
                    } else if (iter.hasNext()
                            && ((Relationship) iter.peek()).isType(OwlRelationships.OWL_EQUIVALENT_CLASS)) {
                        // Ignore the path hop representing the equivalence
                    } else {
                        path.add((Node) container);
                    }
                }
            }
            callback.processPath(path);
        }
    }
}

From source file:com.foundationdb.server.store.statistics.histograms.Sampler.java

private List<Bucket<T>> mergeUnpopularsIntoPopulars(PopularitySplit<T> split) {
    Deque<Bucket<T>> populars = split.popularBuckets;
    assert populars.size() >= maxSize : "failed  populars.size[" + populars.size() + "] >= maxSize[" + maxSize
            + "]";
    PeekingIterator<Bucket<T>> unpopulars = Iterators.peekingIterator(split.regularBuckets.iterator());
    List<Bucket<T>> results = new ArrayList<>(populars.size());
    BucketSampler<T> sampler = new BucketSampler<>(maxSize, split.popularsCount, false);
    for (Bucket<T> popular : populars) {
        if (sampler.add(popular)) {
            // merge in all the unpopulars less than this one
            while (unpopulars.hasNext() && unpopulars.peek().value().compareTo(popular.value()) <= 0) {
                Bucket<T> mergeMe = unpopulars.next();
                mergeUp(mergeMe, popular);
            }//from  w ww  .j  a  va 2 s . c o m
            results.add(popular);
        }
    }
    // now, create one last value which merges in all of the remaining unpopulars
    Bucket<T> last = null;
    while (unpopulars.hasNext()) {
        Bucket<T> unpopular = unpopulars.next();
        if (last != null)
            mergeUp(last, unpopular);
        last = unpopular;
    }
    if (last != null)
        results.add(last);

    return results;
}

From source file:org.apache.accumulo.gc.GarbageCollectionAlgorithm.java

protected void confirmDeletesFromReplication(Iterator<Entry<String, Status>> replicationNeededIterator,
        Iterator<Entry<String, String>> candidateMapIterator) {
    PeekingIterator<Entry<String, Status>> pendingReplication = Iterators
            .peekingIterator(replicationNeededIterator);
    PeekingIterator<Entry<String, String>> candidates = Iterators.peekingIterator(candidateMapIterator);
    while (pendingReplication.hasNext() && candidates.hasNext()) {
        Entry<String, Status> pendingReplica = pendingReplication.peek();
        Entry<String, String> candidate = candidates.peek();

        String filePendingReplication = pendingReplica.getKey();
        String fullPathCandidate = candidate.getValue();

        int comparison = filePendingReplication.compareTo(fullPathCandidate);
        if (comparison < 0) {
            pendingReplication.next();//  www  .j a v a 2  s.  com
        } else if (comparison > 1) {
            candidates.next();
        } else {
            // We want to advance both, and try to delete the candidate if we can
            candidates.next();
            pendingReplication.next();

            // We cannot delete a file if it is still needed for replication
            if (!StatusUtil.isSafeForRemoval(pendingReplica.getValue())) {
                // If it must be replicated, we must remove it from the candidate set to prevent deletion
                candidates.remove();
            }
        }
    }
}

From source file:org.geogit.api.plumbing.diff.DiffTreeVisitor.java

/**
 * Traverse and compare the {@link RevTree#children() children} nodes of two leaf trees, calling
 * {@link #node(Consumer, Node, Node)} for each diff.
 *//*from   w  w  w .j a va  2  s . c  om*/
private void traverseLeafLeaf(Consumer consumer, Iterator<Node> leftc, Iterator<Node> rightc) {
    PeekingIterator<Node> li = Iterators.peekingIterator(leftc);
    PeekingIterator<Node> ri = Iterators.peekingIterator(rightc);

    while (li.hasNext() && ri.hasNext()) {
        Node lpeek = li.peek();
        Node rpeek = ri.peek();
        int order = ORDER.compare(lpeek, rpeek);
        if (order < 0) {
            node(consumer, li.next(), null);// removal
        } else if (order == 0) {// change
            // same feature at both sides of the traversal, consume them and check if its
            // changed it or not
            Node l = li.next();
            Node r = ri.next();
            if (!l.equals(r)) {
                node(consumer, l, r);
            }
        } else {
            node(consumer, null, ri.next());// addition
        }
    }

    checkState(!li.hasNext() || !ri.hasNext(),
            "either the left or the right iterator should have been fully consumed");

    // right fully consumed, any remaining node in left is a removal
    while (li.hasNext()) {
        node(consumer, li.next(), null);
    }

    // left fully consumed, any remaining node in right is an add
    while (ri.hasNext()) {
        node(consumer, null, ri.next());
    }
}

From source file:fr.loria.parole.artimate.engine.Ardor3DWrapper.java

@Override
public void playAnimation(UnitSequence units) {
    final String layerName = "-ANIMATION_LAYER-";
    // remove layer if it exists
    AnimationLayer layer = animation.findAnimationLayer(layerName);
    animation.removeAnimationLayer(layer);

    // create new layer
    layer = new AnimationLayer(layerName);
    animation.addAnimationLayer(layer);//from w  w w .  java 2  s.  c o m

    // iterate over units
    PeekingIterator<Unit> unitIterator = Iterators.peekingIterator(units.iterator());
    while (unitIterator.hasNext()) {
        // get animation state
        Unit unit = unitIterator.next();
        SteadyState state = (SteadyState) unit.getAnimation();

        // add end transition so that state jumps to next in sequence at end (except for last)
        if (unitIterator.hasNext()) {
            Unit nextUnit = unitIterator.peek();
            SteadyState nextState = (SteadyState) nextUnit.getAnimation();
            state.setEndTransition(new ImmediateTransitionState(nextState.getName()));
        }

        // add state to layer
        layer.addSteadyState(state);
    }

    layer.setCurrentState((SteadyState) units.get(0).getAnimation(), true);
}

From source file:com.google.cloud.storage.contrib.nio.UnixPath.java

/**
 * Returns {@code other} made relative to {@code path}.
 *
 * @see java.nio.file.Path#relativize(java.nio.file.Path)
 *///from  www .ja  v  a2 s . co  m
public UnixPath relativize(UnixPath other) {
    checkArgument(isAbsolute() == other.isAbsolute(), "'other' is different type of Path");
    if (path.isEmpty()) {
        return other;
    }
    PeekingIterator<String> left = Iterators.peekingIterator(split());
    PeekingIterator<String> right = Iterators.peekingIterator(other.split());
    while (left.hasNext() && right.hasNext()) {
        if (!left.peek().equals(right.peek())) {
            break;
        }
        left.next();
        right.next();
    }
    StringBuilder result = new StringBuilder(path.length() + other.path.length());
    while (left.hasNext()) {
        result.append(PARENT_DIR);
        result.append(SEPARATOR);
        left.next();
    }
    while (right.hasNext()) {
        result.append(right.next());
        result.append(SEPARATOR);
    }
    if (result.length() > 0 && !other.hasTrailingSeparator()) {
        result.deleteCharAt(result.length() - 1);
    }
    return new UnixPath(permitEmptyComponents, result.toString());
}

From source file:com.digitalpetri.opcua.server.ctt.CttNamespace.java

private UaObjectNode addFoldersToRoot(UaNode root, String path) {
    if (path.startsWith("/"))
        path = path.substring(1, path.length());
    String[] elements = path.split("/");

    LinkedList<UaObjectNode> folderNodes = processPathElements(Lists.newArrayList(elements),
            Lists.newArrayList(), Lists.newLinkedList());

    UaObjectNode firstNode = folderNodes.getFirst();

    if (!nodes.containsKey(firstNode.getNodeId())) {
        nodes.put(firstNode.getNodeId(), firstNode);

        nodes.get(root.getNodeId()).addReference(new Reference(root.getNodeId(), Identifiers.Organizes,
                firstNode.getNodeId().expanded(), firstNode.getNodeClass(), true));

        logger.debug("Added reference: {} -> {}", root.getNodeId(), firstNode.getNodeId());
    }//from  www . ja  v  a 2  s. c om

    PeekingIterator<UaObjectNode> iterator = Iterators.peekingIterator(folderNodes.iterator());

    while (iterator.hasNext()) {
        UaObjectNode node = iterator.next();

        nodes.putIfAbsent(node.getNodeId(), node);

        if (iterator.hasNext()) {
            UaObjectNode next = iterator.peek();

            if (!nodes.containsKey(next.getNodeId())) {
                nodes.put(next.getNodeId(), next);

                nodes.get(node.getNodeId()).addReference(new Reference(node.getNodeId(), Identifiers.Organizes,
                        next.getNodeId().expanded(), next.getNodeClass(), true));

                logger.debug("Added reference: {} -> {}", node.getNodeId(), next.getNodeId());
            }
        }
    }

    return folderNodes.getLast();
}

From source file:co.cask.cdap.metrics.query.MetricsRequestExecutor.java

public JsonElement executeQuery(MetricsRequest metricsRequest) throws IOException, OperationException {

    // Pretty ugly logic now. Need to refactor
    Object resultObj = null;//from  w  ww  . jav a2  s . c o  m
    if (metricsRequest.getType() == MetricsRequest.Type.TIME_SERIES) {
        TimeSeriesResponse.Builder builder = TimeSeriesResponse.builder(metricsRequest.getStartTime(),
                metricsRequest.getEndTime());
        // Special metrics handle that requires computation from multiple time series.
        if ("process.busyness".equals(metricsRequest.getMetricPrefix())) {
            computeProcessBusyness(metricsRequest, builder);
        } else {
            MetricsScanQuery scanQuery = createScanQuery(metricsRequest);

            PeekingIterator<TimeValue> timeValueItor = Iterators.peekingIterator(
                    queryTimeSeries(metricsRequest.getScope(), scanQuery, metricsRequest.getInterpolator()));

            // if this is an interpolated timeseries, we might have extended the "start" in order to interpolate.
            // so fast forward the iterator until we we're inside the actual query time window.
            while (timeValueItor.hasNext()
                    && (timeValueItor.peek().getTime() < metricsRequest.getStartTime())) {
                timeValueItor.next();
            }

            for (int i = 0; i < metricsRequest.getCount(); i++) {
                long resultTime = metricsRequest.getStartTime() + i;

                if (timeValueItor.hasNext() && timeValueItor.peek().getTime() == resultTime) {
                    builder.addData(resultTime, timeValueItor.next().getValue());
                    continue;
                }
                builder.addData(resultTime, 0);
            }
        }
        resultObj = builder.build();

    } else if (metricsRequest.getType() == MetricsRequest.Type.AGGREGATE) {
        // Special metrics handle that requires computation from multiple aggregates results.
        if ("process.events.pending".equals(metricsRequest.getMetricPrefix())) {
            resultObj = computeQueueLength(metricsRequest);
        } else {
            resultObj = getAggregates(metricsRequest);
        }
    }

    return GSON.toJsonTree(resultObj);
}

From source file:io.bazel.rules.closure.Webpath.java

/**
 * Returns {@code other} made relative to {@code path}.
 *
 * @see java.nio.file.Path#relativize(java.nio.file.Path)
 *//*from  w w w .j  a  va2  s.  c o  m*/
public Webpath relativize(Webpath other) {
    checkArgument(isAbsolute() == other.isAbsolute(), "'other' is different type of Path");
    if (path.isEmpty()) {
        return other;
    }
    PeekingIterator<String> left = Iterators.peekingIterator(split());
    PeekingIterator<String> right = Iterators.peekingIterator(other.split());
    while (left.hasNext() && right.hasNext()) {
        if (!left.peek().equals(right.peek())) {
            break;
        }
        left.next();
        right.next();
    }
    StringBuilder result = new StringBuilder(path.length() + other.path.length());
    while (left.hasNext()) {
        result.append(PARENT_DIR);
        result.append(SEPARATOR);
        left.next();
    }
    while (right.hasNext()) {
        result.append(right.next());
        result.append(SEPARATOR);
    }
    if (result.length() > 0 && !other.hasTrailingSeparator()) {
        result.deleteCharAt(result.length() - 1);
    }
    return new Webpath(result.toString());
}