Example usage for com.google.common.collect Iterators limit

List of usage examples for com.google.common.collect Iterators limit

Introduction

In this page you can find the example usage for com.google.common.collect Iterators limit.

Prototype

public static <T> Iterator<T> limit(final Iterator<T> iterator, final int limitSize) 

Source Link

Document

Creates an iterator returning the first limitSize elements of the given iterator.

Usage

From source file:io.crate.test.integration.CrateTestCluster.java

/**
 * Ensures that at most <code>n</code> are up and running.
 * If less nodes that <code>n</code> are running this method
 * will not start any additional nodes.//from   w ww. j a v a 2s .c  o  m
 */
public synchronized void ensureAtMostNumNodes(int n) {
    if (nodes.size() <= n) {
        return;
    }
    // prevent killing the master if possible
    final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator()
            : Iterators.filter(nodes.values().iterator(),
                    Predicates.not(new MasterNodePredicate(getMasterName())));
    final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - n);
    logger.info("reducing cluster size from {} to {}", nodes.size() - n, n);
    Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>();
    while (limit.hasNext()) {
        NodeAndClient next = limit.next();
        nodesToRemove.add(next);
        next.close();
    }
    for (NodeAndClient toRemove : nodesToRemove) {
        nodes.remove(toRemove.name);
    }
}

From source file:org.locationtech.geogig.porcelain.CheckoutOp.java

private CheckoutResult branchCheckout() throws CheckoutException {
    CheckoutResult result = new CheckoutResult();
    final ConflictsDatabase conflictsDatabase = conflictsDatabase();
    final boolean hasConflicts = conflictsDatabase.hasConflicts(null);
    if (hasConflicts && !force) {
        final long conflictCount = conflictsDatabase.getCountByPrefix(null, null);
        Iterator<Conflict> conflicts = Iterators.limit(conflictsDatabase.getByPrefix(branchOrCommit, null), 25);
        StringBuilder msg = new StringBuilder();
        while (conflicts.hasNext()) {
            Conflict conflict = conflicts.next();
            msg.append("error: " + conflict.getPath() + " needs merge.\n");
        }//from w ww.  ja v  a2  s .  c  o  m
        if (conflictCount > 25) {
            msg.append(String.format("and %,d more.\n", (conflictCount - 25)));
        }
        msg.append("You need to resolve your index first.\n");
        throw new CheckoutException(msg.toString(), StatusCode.UNMERGED_PATHS);
    }
    Optional<Ref> targetRef = Optional.absent();
    Optional<ObjectId> targetCommitId = Optional.absent();
    Optional<ObjectId> targetTreeId = Optional.absent();
    targetRef = command(RefParse.class).setName(branchOrCommit).call();
    if (targetRef.isPresent()) {
        ObjectId commitId = targetRef.get().getObjectId();
        if (targetRef.get().getName().startsWith(Ref.REMOTES_PREFIX)) {
            String remoteName = targetRef.get().getName();
            remoteName = remoteName.substring(Ref.REMOTES_PREFIX.length(),
                    targetRef.get().getName().lastIndexOf("/"));

            if (branchOrCommit.contains(remoteName + '/')) {
                RevCommit commit = command(RevObjectParse.class).setObjectId(commitId).call(RevCommit.class)
                        .get();

                targetTreeId = Optional.of(commit.getTreeId());
                targetCommitId = Optional.of(commit.getId());
                targetRef = Optional.absent();
            } else {

                Ref branch = command(BranchCreateOp.class).setName(targetRef.get().localName())
                        .setSource(commitId.toString()).call();

                command(ConfigOp.class).setAction(ConfigAction.CONFIG_SET).setScope(ConfigScope.LOCAL)
                        .setName("branches." + branch.localName() + ".remote").setValue(remoteName).call();

                command(ConfigOp.class).setAction(ConfigAction.CONFIG_SET).setScope(ConfigScope.LOCAL)
                        .setName("branches." + branch.localName() + ".merge")
                        .setValue(targetRef.get().getName()).call();

                targetRef = Optional.of(branch);
                result.setResult(CheckoutResult.Results.CHECKOUT_REMOTE_BRANCH);
                result.setRemoteName(remoteName);
            }
        }

        if (commitId.isNull()) {
            targetTreeId = Optional.of(ObjectId.NULL);
            targetCommitId = Optional.of(ObjectId.NULL);
        } else {
            Optional<RevCommit> parsed = command(RevObjectParse.class).setObjectId(commitId)
                    .call(RevCommit.class);
            checkState(parsed.isPresent());
            checkState(parsed.get() instanceof RevCommit);
            RevCommit commit = parsed.get();
            targetCommitId = Optional.of(commit.getId());
            targetTreeId = Optional.of(commit.getTreeId());
        }
    } else {
        final Optional<ObjectId> addressed = command(RevParse.class).setRefSpec(branchOrCommit).call();
        checkArgument(addressed.isPresent(), "source '" + branchOrCommit + "' not found in repository");

        RevCommit commit = command(RevObjectParse.class).setObjectId(addressed.get()).call(RevCommit.class)
                .get();

        targetTreeId = Optional.of(commit.getTreeId());
        targetCommitId = Optional.of(commit.getId());
    }
    if (targetTreeId.isPresent()) {
        if (!force) {
            if (!stagingArea().isClean() || !workingTree().isClean()) {
                throw new CheckoutException(StatusCode.LOCAL_CHANGES_NOT_COMMITTED);
            }
        }
        // update work tree
        ObjectId treeId = targetTreeId.get();
        workingTree().updateWorkHead(treeId);
        stagingArea().updateStageHead(treeId);
        result.setNewTree(treeId);
        if (targetRef.isPresent()) {
            // update HEAD
            Ref target = targetRef.get();
            String refName;
            if (target instanceof SymRef) {// beware of cyclic refs, peel symrefs
                refName = ((SymRef) target).getTarget();
            } else {
                refName = target.getName();
            }
            command(UpdateSymRef.class).setName(Ref.HEAD).setNewValue(refName).call();
            result.setNewRef(targetRef.get());
            result.setOid(targetCommitId.get());
            result.setResult(CheckoutResult.Results.CHECKOUT_LOCAL_BRANCH);
        } else {
            // set HEAD to a dettached state
            ObjectId commitId = targetCommitId.get();
            command(UpdateRef.class).setName(Ref.HEAD).setNewValue(commitId).call();
            result.setOid(commitId);
            result.setResult(CheckoutResult.Results.DETACHED_HEAD);
        }
        Optional<Ref> ref = command(RefParse.class).setName(Ref.MERGE_HEAD).call();
        if (ref.isPresent()) {
            command(UpdateRef.class).setName(Ref.MERGE_HEAD).setDelete(true).call();
        }
    }
    return result;
}

From source file:zipkin.storage.cassandra3.CassandraSpanStore.java

/**
 * This fans out into a number of requests. The returned future will fail if any of the
 * inputs fail.//from  w  w  w  .j ava2 s .c  om
 *
 * <p>When {@link QueryRequest#serviceName service name} is unset, service names will be
 * fetched eagerly, implying an additional query.
 *
 * <p>The duration query is the most expensive query in cassandra, as it turns into 1 request per
 * hour of {@link QueryRequest#lookback lookback}. Because many times lookback is set to a day,
 * this means 24 requests to the backend!
 *
 * <p>See https://github.com/openzipkin/zipkin-java/issues/200
 */
@Override
public ListenableFuture<List<List<Span>>> getTraces(final QueryRequest request) {
    // Over fetch on indexes as they don't return distinct (trace id, timestamp) rows.
    final int traceIndexFetchSize = request.limit * indexFetchMultiplier;
    ListenableFuture<Map<TraceIdUDT, Long>> traceIdToTimestamp = getTraceIdsByServiceNames(request);
    List<String> annotationKeys = CassandraUtil.annotationKeys(request);
    ListenableFuture<Collection<TraceIdUDT>> traceIds;
    if (annotationKeys.isEmpty()) {
        // Simplest case is when there is no annotation query. Limit is valid since there's no AND
        // query that could reduce the results returned to less than the limit.
        traceIds = Futures.transform(traceIdToTimestamp, CassandraUtil.traceIdsSortedByDescTimestamp());
    } else {
        // While a valid port of the scala cassandra span store (from zipkin 1.35), there is a fault.
        // each annotation key is an intersection, meaning we likely return < traceIndexFetchSize.
        List<ListenableFuture<Map<TraceIdUDT, Long>>> futureKeySetsToIntersect = new ArrayList<>();
        futureKeySetsToIntersect.add(traceIdToTimestamp);
        for (String annotationKey : annotationKeys) {
            futureKeySetsToIntersect.add(getTraceIdsByAnnotation(annotationKey, request.endTs, request.lookback,
                    traceIndexFetchSize));
        }
        // We achieve the AND goal, by intersecting each of the key sets.
        traceIds = Futures.transform(allAsList(futureKeySetsToIntersect), CassandraUtil.intersectKeySets());
        // @xxx the sorting by timestamp desc is broken here^
    }
    return transform(traceIds, new AsyncFunction<Collection<TraceIdUDT>, List<List<Span>>>() {
        @Override
        public ListenableFuture<List<List<Span>>> apply(Collection<TraceIdUDT> traceIds) {
            ImmutableSet<TraceIdUDT> set = ImmutableSet
                    .copyOf(Iterators.limit(traceIds.iterator(), request.limit));
            return transform(getSpansByTraceIds(set, maxTraceCols),
                    new Function<List<Span>, List<List<Span>>>() {
                        @Override
                        public List<List<Span>> apply(List<Span> input) {
                            return GroupByTraceId.apply(input, strictTraceId, true);
                        }
                    });
        }

        @Override
        public String toString() {
            return "getSpansByTraceIds";
        }
    });
}

From source file:zipkin.storage.cassandra.CassandraSpanStore.java

/**
 * This fans out into a potentially large amount of requests related to the amount of annotations
 * queried. The returned future will fail if any of the inputs fail.
 *
 * <p>When {@link QueryRequest#serviceName service name} is unset, service names will be
 * fetched eagerly, implying an additional query.
 *//* w w  w .j a  v a 2 s.  c  om*/
@Override
public ListenableFuture<List<List<Span>>> getTraces(final QueryRequest request) {
    // Over fetch on indexes as they don't return distinct (trace id, timestamp) rows.
    final int traceIndexFetchSize = request.limit * indexFetchMultiplier;
    ListenableFuture<Map<Long, Long>> traceIdToTimestamp;
    if (request.spanName != null) {
        traceIdToTimestamp = getTraceIdsBySpanName(request.serviceName, request.spanName, request.endTs * 1000,
                request.lookback * 1000, traceIndexFetchSize);
    } else if (request.serviceName != null) {
        traceIdToTimestamp = getTraceIdsByServiceNames(Collections.singletonList(request.serviceName),
                request.endTs * 1000, request.lookback * 1000, traceIndexFetchSize);
    } else {
        checkArgument(selectTraceIdsByServiceNames != null,
                "getTraces without serviceName requires Cassandra 2.2 or later");
        traceIdToTimestamp = transform(getServiceNames(), new AsyncFunction<List<String>, Map<Long, Long>>() {
            @Override
            public ListenableFuture<Map<Long, Long>> apply(List<String> serviceNames) {
                return getTraceIdsByServiceNames(serviceNames, request.endTs * 1000, request.lookback * 1000,
                        traceIndexFetchSize);
            }
        });
    }

    List<String> annotationKeys = CassandraUtil.annotationKeys(request);

    ListenableFuture<Set<Long>> traceIds;
    if (annotationKeys.isEmpty()) {
        // Simplest case is when there is no annotation query. Limit is valid since there's no AND
        // query that could reduce the results returned to less than the limit.
        traceIds = Futures.transform(traceIdToTimestamp, CassandraUtil.keyset());
    } else {
        // While a valid port of the scala cassandra span store (from zipkin 1.35), there is a fault.
        // each annotation key is an intersection, meaning we likely return < traceIndexFetchSize.
        List<ListenableFuture<Map<Long, Long>>> futureKeySetsToIntersect = new ArrayList<>();
        futureKeySetsToIntersect.add(traceIdToTimestamp);
        for (String annotationKey : annotationKeys) {
            futureKeySetsToIntersect.add(getTraceIdsByAnnotation(annotationKey, request.endTs * 1000,
                    request.lookback * 1000, traceIndexFetchSize));
        }
        // We achieve the AND goal, by intersecting each of the key sets.
        traceIds = Futures.transform(allAsList(futureKeySetsToIntersect), CassandraUtil.intersectKeySets());
    }
    return transform(traceIds, new AsyncFunction<Set<Long>, List<List<Span>>>() {
        @Override
        public ListenableFuture<List<List<Span>>> apply(Set<Long> traceIds) {
            traceIds = ImmutableSet.copyOf(Iterators.limit(traceIds.iterator(), request.limit));
            return transform(getSpansByTraceIds(traceIds, maxTraceCols),
                    new Function<List<Span>, List<List<Span>>>() {
                        @Override
                        public List<List<Span>> apply(List<Span> input) {
                            // Indexes only contain Span.traceId, so our matches are imprecise on Span.traceIdHigh
                            return FluentIterable.from(GroupByTraceId.apply(input, strictTraceId, true))
                                    .filter(new Predicate<List<Span>>() {
                                        @Override
                                        public boolean apply(List<Span> input) {
                                            return input.get(0).traceIdHigh == 0 || request.test(input);
                                        }
                                    }).toList();
                        }
                    });
        }

        @Override
        public String toString() {
            return "getSpansByTraceIds";
        }
    });
}

From source file:org.locationtech.geogig.rocksdb.RocksdbObjectStore.java

@Override
public void putAll(Iterator<? extends RevObject> objects, final BulkOpListener listener) {
    checkNotNull(objects, "objects is null");
    checkNotNull(listener, "listener is null");
    checkWritable();// ww w.  j a v  a2 s.  c om

    final boolean checkExists = !BulkOpListener.NOOP_LISTENER.equals(listener);

    ByteArrayOutputStream rawOut = new ByteArrayOutputStream(4096);
    byte[] keybuff = new byte[ObjectId.NUM_BYTES];

    Map<ObjectId, Integer> insertedIds = new HashMap<ObjectId, Integer>();
    try (RocksDBReference dbRef = dbhandle.getReference(); WriteOptions wo = new WriteOptions()) {
        wo.setDisableWAL(true);
        wo.setSync(false);
        try (ReadOptions ro = new ReadOptions()) {
            ro.setFillCache(false);
            ro.setVerifyChecksums(false);
            while (objects.hasNext()) {
                Iterator<? extends RevObject> partition = Iterators.limit(objects, 10_000);

                try (WriteBatch batch = new WriteBatch()) {
                    while (partition.hasNext()) {
                        RevObject object = partition.next();
                        rawOut.reset();
                        writeObject(object, rawOut);

                        object.getId().getRawValue(keybuff);
                        final byte[] value = rawOut.toByteArray();

                        boolean exists = checkExists ? exists(ro, keybuff) : false;
                        if (exists) {
                            listener.found(object.getId(), null);
                        } else {
                            batch.put(keybuff, value);
                            insertedIds.put(object.getId(), Integer.valueOf(value.length));
                        }

                    }
                    // Stopwatch sw = Stopwatch.createStarted();
                    dbRef.db().write(wo, batch);
                    for (Entry<ObjectId, Integer> entry : insertedIds.entrySet()) {
                        listener.inserted(entry.getKey(), entry.getValue());
                    }
                    insertedIds.clear();
                    // System.err.printf("--- synced writes in %s\n", sw.stop());
                }
            }
        }
        wo.sync();
    } catch (RocksDBException e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.marmotta.commons.sesame.repository.ResourceUtils.java

/**
 * List resources with the given prefix/*ww  w.  j ava2s  . com*/
 *
 * @param prefix the prefix
 * @param offset
 * @param limit
 */
public static Iterable<URI> listResourcesByPrefix(final RepositoryConnection con, final String prefix,
        final int offset, final int limit) {
    final ResourceConnection rcon = getWrappedResourceConnection(con);

    if (rcon != null) {
        return new Iterable<URI>() {
            @Override
            public Iterator<URI> iterator() {
                try {
                    Iterator<URI> result = ResultUtils.unwrap(rcon.getResources(prefix));

                    Iterators.advance(result, offset);

                    if (limit > 0) {
                        return Iterators.limit(result, limit);
                    } else {
                        return result;
                    }
                } catch (RepositoryException e) {
                    ExceptionUtils.handleRepositoryException(e, ResourceUtils.class);
                    return Iterators.emptyIterator();
                }

            }
        };
    } else {
        // no direct prefix listing support, need to filter the listResources result
        return new Iterable<URI>() {
            @Override
            public Iterator<URI> iterator() {
                Iterator<URI> result = Iterators
                        .transform(Iterators.filter(listResources(con).iterator(), new Predicate<Resource>() {
                            @Override
                            public boolean apply(Resource input) {
                                return input instanceof URI && input.stringValue().startsWith(prefix);
                            }
                        }), new Function<Resource, URI>() {
                            @Override
                            public URI apply(Resource input) {
                                return (URI) input;
                            }
                        });

                Iterators.advance(result, offset);

                if (limit > 0) {
                    return Iterators.limit(result, limit);
                } else {
                    return result;
                }
            }
        };
    }
}

From source file:io.fineo.drill.exec.store.dynamo.physical.DynamoRecordReader.java

private void writeScalar(int index, Object value, ScalarVectorStruct struct) {
    if (value == null) {
        return;//from  w  w  w . j  a  v a  2  s  . c o  m
    }
    MajorType type = struct.getType();
    MinorType minor = type.getMinorType();
    ValueVector vector = struct.getVector();
    type: switch (minor) {
    case VARCHAR:
        byte[] bytes = ((String) value).getBytes();
        switch (type.getMode()) {
        case OPTIONAL:
            ((NullableVarCharVector.Mutator) vector.getMutator()).setSafe(index, bytes, 0, bytes.length);
            break type;
        case REQUIRED:
            ((VarCharVector.Mutator) vector.getMutator()).setSafe(index, bytes);
            break type;
        case REPEATED:
            ((RepeatedVarCharVector.Mutator) vector.getMutator()).addSafe(index, bytes);
            break type;
        default:
            failForMode(type);
        }
    case BIT:
        int bool = (Boolean) value ? 1 : 0;
        switch (type.getMode()) {
        case OPTIONAL:
            NullableBitVector bv = (NullableBitVector) vector;
            bv.getMutator().setSafe(index, bool);
            break type;
        case REQUIRED:
            ((BitVector.Mutator) vector.getMutator()).setSafe(index, bool);
            break type;
        case REPEATED:
            ((RepeatedBitVector.Mutator) vector.getMutator()).addSafe(index, bool);
            break type;
        default:
            failForMode(type);
        }
    case DECIMAL38SPARSE:
        //TODO fix this logic. This is just... wrong, but we get around it by just returning a
        // string representation of the value
        BigDecimal decimal = (BigDecimal) value;
        BigInteger intVal = decimal.unscaledValue();
        String zeros = Joiner.on("").join(Iterators.limit(Iterators.cycle("0"), decimal.scale()));
        BigInteger base = new BigInteger(1 + zeros);
        intVal = intVal.multiply(base);

        byte[] sparseInt = new byte[24];
        byte[] intBytes = intVal.toByteArray();
        arraycopy(intBytes, 0, sparseInt, 0, intBytes.length);
        // kind of an ugly way to manage the actual transfer of bytes. However, this is much
        // easier than trying to manage a larger page of bytes.
        Decimal38SparseHolder holder = new Decimal38SparseHolder();
        holder.start = 0;
        holder.buffer = operatorContext.getManagedBuffer(sparseInt.length);
        holder.buffer.setBytes(0, sparseInt);
        holder.precision = decimal.precision();
        holder.scale = decimal.scale();
        switch (type.getMode()) {
        case OPTIONAL:
            ((NullableDecimal38SparseVector.Mutator) vector.getMutator()).setSafe(index, holder);
            break type;
        case REQUIRED:
            ((Decimal38SparseVector.Mutator) vector.getMutator()).setSafe(index, holder);
            break type;
        case REPEATED:
            ((RepeatedDecimal38SparseVector.Mutator) vector.getMutator()).addSafe(index, holder);
            break type;
        default:
            failForMode(type);
        }
    case VARBINARY:
        byte[] bytesBinary = (byte[]) value;
        switch (type.getMode()) {
        case OPTIONAL:
            ((NullableVarBinaryVector.Mutator) vector.getMutator()).setSafe(index, bytesBinary, 0,
                    bytesBinary.length);
            break type;
        case REQUIRED:
            ((VarBinaryVector.Mutator) vector.getMutator()).setSafe(index, bytesBinary);
            break type;
        case REPEATED:
            ((RepeatedVarBinaryVector.Mutator) vector.getMutator()).addSafe(index, bytesBinary);
            break type;
        default:
            failForMode(type);
        }
    case NULL:
        System.out.println();
    default:
        throw new IllegalArgumentException("Unsupported type: " + type);
    }
}

From source file:org.elasticsearch.test.InternalTestCluster.java

/**
 * Ensures that at most <code>n</code> are up and running.
 * If less nodes that <code>n</code> are running this method
 * will not start any additional nodes./*www.j ava 2 s  . c o m*/
 */
public synchronized void ensureAtMostNumDataNodes(int n) throws IOException {
    int size = numDataNodes();
    if (size <= n) {
        return;
    }
    // prevent killing the master if possible and client nodes
    final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator()
            : Iterators.filter(nodes.values().iterator(), Predicates.and(new DataNodePredicate(),
                    Predicates.not(new MasterNodePredicate(getMasterName()))));

    final Iterator<NodeAndClient> limit = Iterators.limit(values, size - n);
    logger.info("changing cluster size from {} to {}, {} data nodes", size(), n + numSharedClientNodes, n);
    Set<NodeAndClient> nodesToRemove = new HashSet<>();
    while (limit.hasNext()) {
        NodeAndClient next = limit.next();
        nodesToRemove.add(next);
        removeDisruptionSchemeFromNode(next);
        next.close();
    }
    for (NodeAndClient toRemove : nodesToRemove) {
        nodes.remove(toRemove.name);
    }
    if (!nodesToRemove.isEmpty() && size() > 0) {
        assertNoTimeout(client().admin().cluster().prepareHealth()
                .setWaitForNodes(Integer.toString(nodes.size())).get());
    }
}

From source file:io.crate.test.integration.CrateTestCluster.java

private synchronized Set<String> nRandomNodes(int numNodes) {
    assert size() >= numNodes;
    return Sets.newHashSet(Iterators.limit(this.nodes.keySet().iterator(), numNodes));
}

From source file:org.elasticsearch.test.InternalTestCluster.java

private synchronized Set<String> nRandomDataNodes(int numNodes) {
    assert size() >= numNodes;
    NavigableMap<String, NodeAndClient> dataNodes = Maps.filterEntries(nodes,
            new EntryNodePredicate(new DataNodePredicate()));
    return Sets.newHashSet(Iterators.limit(dataNodes.keySet().iterator(), numNodes));
}