Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:com.google.pubsub.flic.controllers.Controller.java

/**
 * Waits for publishers to complete the load test.
 *//*from  w w w .java  2s.co m*/
public void waitForPublisherClients() throws Throwable {
    try {
        Futures.allAsList(clients.stream().filter(c -> c.getClientType().isPublisher())
                .map(Client::getDoneFuture).collect(Collectors.toList())).get();
    } catch (ExecutionException e) {
        throw e.getCause();
    }
}

From source file:com.facebook.buck.distributed.DistBuildClientExecutor.java

public int executeAndPrintFailuresToEventBus(final WeightedListeningExecutorService executorService,
        ProjectFilesystem projectFilesystem, FileHashCache fileHashCache, BuckEventBus eventBus)
        throws IOException, InterruptedException {

    BuildJob job = distBuildService.createBuild();
    final BuildId id = job.getBuildId();
    LOG.info("Created job. Build id = " + id.getId());
    logDebugInfo(job);//from   w w w.  j av  a 2 s.co m

    List<ListenableFuture<Void>> asyncJobs = new LinkedList<>();
    LOG.info("Uploading local changes.");
    asyncJobs.add(distBuildService.uploadMissingFiles(buildJobState.fileHashes, executorService));

    LOG.info("Uploading target graph.");
    asyncJobs.add(distBuildService.uploadTargetGraph(buildJobState, id, executorService));

    LOG.info("Uploading buck dot-files.");
    asyncJobs.add(distBuildService.uploadBuckDotFiles(id, projectFilesystem, fileHashCache, executorService));

    try {
        Futures.allAsList(asyncJobs).get();
    } catch (ExecutionException e) {
        LOG.error("Upload failed.");
        throw new RuntimeException(e);
    }

    distBuildService.setBuckVersion(id, buckVersion);
    LOG.info("Set Buck Version. Build status: " + job.getStatus().toString());

    job = distBuildService.startBuild(id);
    LOG.info("Started job. Build status: " + job.getStatus().toString());
    logDebugInfo(job);

    Stopwatch stopwatch = Stopwatch.createStarted();
    // Keep polling until the build is complete or failed.
    do {
        job = distBuildService.getCurrentBuildJobState(id);
        LOG.info("Got build status: " + job.getStatus().toString());

        DistBuildStatus distBuildStatus = prepareStatusFromJob(job)
                .setETAMillis(MAX_BUILD_DURATION_MILLIS - stopwatch.elapsed(TimeUnit.MILLISECONDS)).build();
        eventBus.post(new DistBuildStatusEvent(distBuildStatus));

        try {
            // TODO(shivanker): Get rid of sleeps in methods which we want to unit test
            Thread.sleep(millisBetweenStatusPoll);
        } catch (InterruptedException e) {
            LOG.error(e, "BuildStatus polling sleep call has been interrupted unexpectedly.");
        }
    } while (!(job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY)
            || job.getStatus().equals(BuildStatus.FAILED)));

    LOG.info("Build was " + (job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) ? "" : "not ")
            + "successful!");
    logDebugInfo(job);

    DistBuildStatus distBuildStatus = prepareStatusFromJob(job).setETAMillis(0).build();
    eventBus.post(new DistBuildStatusEvent(distBuildStatus));

    return job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) ? 0 : 1;
}

From source file:com.google.gerrit.server.index.ReindexAfterUpdate.java

@Override
public void onGitReferenceUpdated(final Event event) {
    if (event.getRefName().startsWith(RefNames.REFS_CHANGES)
            || event.getRefName().startsWith(RefNames.REFS_DRAFT_COMMENTS)
            || event.getRefName().startsWith(RefNames.REFS_USERS)) {
        return;//  w w  w.java 2 s. co m
    }
    Futures.transformAsync(executor.submit(new GetChanges(event)),
            new AsyncFunction<List<Change>, List<Void>>() {
                @Override
                public ListenableFuture<List<Void>> apply(List<Change> changes) {
                    List<ListenableFuture<Void>> result = Lists.newArrayListWithCapacity(changes.size());
                    for (Change c : changes) {
                        result.add(executor.submit(new Index(event, c.getId())));
                    }
                    return Futures.allAsList(result);
                }
            });
}

From source file:org.apache.storm.cassandra.executor.AsyncExecutor.java

/**
 * Asynchronously executes all statements associated to the specified input.
 * The input will be passed to handler#onSuccess once all queries succeed or to handler#onFailure if any one of them fails.
 *//*from  www . j a  v a2s.  c o  m*/
public List<SettableFuture<T>> execAsync(List<Statement> statements, final T input) {

    List<SettableFuture<T>> settableFutures = new ArrayList<>(statements.size());

    for (Statement s : statements)
        settableFutures.add(execAsync(s, input, AsyncResultHandler.NO_OP_HANDLER));

    ListenableFuture<List<T>> allAsList = Futures.allAsList(settableFutures);
    Futures.addCallback(allAsList, new FutureCallback<List<T>>() {
        @Override
        public void onSuccess(List<T> inputs) {
            handler.success(input);
        }

        @Override
        public void onFailure(Throwable t) {
            handler.failure(t, input);
        }
    }, executorService);
    return settableFutures;
}

From source file:io.druid.query.GroupByParallelQueryRunner.java

@Override
public Sequence<T> run(final Query<T> queryParam, final Map<String, Object> responseContext) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> indexAccumulatorPair = GroupByQueryHelper
            .createIndexAccumulatorPair(query, configSupplier.get(), bufferPool);
    final Pair<Queue, Accumulator<Queue, T>> bySegmentAccumulatorPair = GroupByQueryHelper
            .createBySegmentAccumulatorPair();
    final boolean bySegment = query.getContextBySegment(false);
    final int priority = query.getContextPriority(0);

    ListenableFuture<List<Void>> futures = Futures.allAsList(Lists.newArrayList(
            Iterables.transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Void>>() {
                @Override//from w  w w.j a  v  a 2  s .  c om
                public ListenableFuture<Void> apply(final QueryRunner<T> input) {
                    if (input == null) {
                        throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
                    }

                    return exec.submit(new AbstractPrioritizedCallable<Void>(priority) {
                        @Override
                        public Void call() throws Exception {
                            try {
                                if (bySegment) {
                                    input.run(queryParam, responseContext).accumulate(
                                            bySegmentAccumulatorPair.lhs, bySegmentAccumulatorPair.rhs);
                                } else {
                                    input.run(queryParam, responseContext).accumulate(indexAccumulatorPair.lhs,
                                            indexAccumulatorPair.rhs);
                                }

                                return null;
                            } catch (QueryInterruptedException e) {
                                throw Throwables.propagate(e);
                            } catch (Exception e) {
                                log.error(e, "Exception with one of the sequences!");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
                }
            })));

    // Let the runners complete
    try {
        queryWatcher.registerQuery(query, futures);
        final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
        if (timeout == null) {
            futures.get();
        } else {
            futures.get(timeout.longValue(), TimeUnit.MILLISECONDS);
        }
    } catch (InterruptedException e) {
        log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
        futures.cancel(true);
        indexAccumulatorPair.lhs.close();
        throw new QueryInterruptedException("Query interrupted");
    } catch (CancellationException e) {
        indexAccumulatorPair.lhs.close();
        throw new QueryInterruptedException("Query cancelled");
    } catch (TimeoutException e) {
        indexAccumulatorPair.lhs.close();
        log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
        futures.cancel(true);
        throw new QueryInterruptedException("Query timeout");
    } catch (ExecutionException e) {
        indexAccumulatorPair.lhs.close();
        throw Throwables.propagate(e.getCause());
    }

    if (bySegment) {
        return Sequences.simple(bySegmentAccumulatorPair.lhs);
    }

    return new ResourceClosingSequence<T>(Sequences.simple(Iterables
            .transform(indexAccumulatorPair.lhs.iterableWithPostAggregations(null), new Function<Row, T>() {
                @Override
                public T apply(Row input) {
                    return (T) input;
                }
            })), indexAccumulatorPair.lhs);
}

From source file:com.google.cloud.bigtable.grpc.async.ResourceLimiterPerf.java

/**
 * @param pool// w w w . jav  a  2s  .c om
 * @throws InterruptedException
 * @throws ExecutionException
 * @throws TimeoutException
 */
private static void test(ListeningExecutorService pool)
        throws InterruptedException, ExecutionException, TimeoutException {
    final ResourceLimiter underTest = new ResourceLimiter(SIZE, (int) SIZE);
    final LinkedBlockingQueue<Long> registeredEvents = new LinkedBlockingQueue<>();

    final int readerCount = 20;
    final int writerCount = 1;
    Runnable writePerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startReg = System.nanoTime();
            int offerCount = REGISTER_COUNT / writerCount;
            try {
                for (int i = 0; i < offerCount; i++) {
                    registeredEvents.offer(underTest.registerOperationWithHeapSize(1));
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startReg;
                System.out.println(String.format("Registered %d in %d ms.  %d nanos/reg.  %f offer/sec",
                        offerCount, totalTime / 1000000, totalTime / offerCount,
                        offerCount * 1000000000.0 / totalTime));
            }
        }
    };
    Runnable readPerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startComplete = System.nanoTime();
            int regCount = REGISTER_COUNT / readerCount;
            try {
                for (int i = 0; i < regCount; i++) {
                    Long registeredId = registeredEvents.poll(1, TimeUnit.SECONDS);
                    if (registeredId == null) {
                        i--;
                    } else {
                        underTest.markCanBeCompleted(registeredId);
                    }
                }
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startComplete;
                System.out.println(String.format(
                        "markCanBeCompleted %d in %d.  %d nanos/complete.  %f unreg/sec", regCount,
                        totalTime / 1000000, totalTime / regCount, regCount * 1000000000.0 / totalTime));
            }
        }
    };

    List<ListenableFuture<?>> writerFutures = new ArrayList<>();
    List<ListenableFuture<?>> readerFutures = new ArrayList<>();

    for (int i = 0; i < writerCount; i++) {
        writerFutures.add(pool.submit(writePerfRunnable));
    }
    Thread.sleep(10);
    for (int i = 0; i < readerCount; i++) {
        readerFutures.add(pool.submit(readPerfRunnable));
    }
    Futures.allAsList(writerFutures).get(300, TimeUnit.MINUTES);
    Futures.allAsList(readerFutures).get(300, TimeUnit.MINUTES);
}

From source file:com.google.cloud.bigtable.grpc.async.RpcThrottlerPerf.java

/**
 * @param pool/*  w ww.ja  v  a2  s  .c  om*/
 * @throws InterruptedException
 * @throws ExecutionException
 * @throws TimeoutException
 */
private static void test(ListeningExecutorService pool)
        throws InterruptedException, ExecutionException, TimeoutException {
    final RpcThrottler underTest = new RpcThrottler(new ResourceLimiter(SIZE, (int) SIZE));
    final LinkedBlockingQueue<Long> registeredEvents = new LinkedBlockingQueue<>();

    final int writerCount = 1;
    final int readerCount = 20;
    Runnable writePerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startReg = System.nanoTime();
            int offerCount = REGISTER_COUNT / writerCount;
            try {
                for (int i = 0; i < offerCount; i++) {
                    registeredEvents.add(underTest.registerOperationWithHeapSize(1));
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startReg;
                System.out.println(String.format("Registered %d in %d ms.  %d nanos/reg.  %f offer/sec",
                        offerCount, totalTime / 1000000, totalTime / offerCount,
                        offerCount * 1000000000.0 / totalTime));
            }
        }
    };
    Runnable readPerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startComplete = System.nanoTime();
            int regCount = REGISTER_COUNT / readerCount;
            try {
                for (int i = 0; i < regCount; i++) {
                    Long registeredId = registeredEvents.poll(1, TimeUnit.SECONDS);
                    if (registeredId == null) {
                        i--;
                    } else {
                        underTest.onRpcCompletion(registeredId);
                    }
                }
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startComplete;
                System.out.println(String.format("onRpcCompletion %d in %d.  %d nanos/complete.  %f unreg/sec",
                        regCount, totalTime / 1000000, totalTime / regCount,
                        regCount * 1000000000.0 / totalTime));
            }
        }
    };

    List<ListenableFuture<?>> writerFutures = new ArrayList<>();
    List<ListenableFuture<?>> readerFutures = new ArrayList<>();

    for (int i = 0; i < writerCount; i++) {
        writerFutures.add(pool.submit(writePerfRunnable));
    }
    Thread.sleep(10);
    for (int i = 0; i < readerCount; i++) {
        readerFutures.add(pool.submit(readPerfRunnable));
    }
    Futures.allAsList(writerFutures).get(300, TimeUnit.MINUTES);
    underTest.awaitCompletion();
}

From source file:org.opendaylight.ocpplugin.impl.OcpPluginProviderImpl.java

private void startRadioHeadConnections() {
    final List<ListenableFuture<Boolean>> starterChain = new ArrayList<>(radioHeadConnectionProviders.size());
    for (final RadioHeadConnectionProvider radioHeadConnectionPrv : radioHeadConnectionProviders) {
        radioHeadConnectionPrv.setRadioHeadConnectionHandler(connectionManager);
        final ListenableFuture<Boolean> isOnlineFuture = radioHeadConnectionPrv.startup();
        starterChain.add(isOnlineFuture);
    }/*from  w w w .  ja  v a2 s .  c o  m*/

    final ListenableFuture<List<Boolean>> srvStarted = Futures.allAsList(starterChain);
    Futures.addCallback(srvStarted, new FutureCallback<List<Boolean>>() {
        @Override
        public void onSuccess(final List<Boolean> result) {
            LOG.info("All radioHeadConnectionProviders are up and running ({}).", result.size());
        }

        @Override
        public void onFailure(final Throwable t) {
            LOG.warn("Some radioHeadConnectionProviders failed to start.", t);
        }
    });
}

From source file:org.opendaylight.controller.sample.kitchen.impl.KitchenServiceImpl.java

@Override
public Future<RpcResult<Void>> makeBreakfast(EggsType eggsType, Class<? extends ToastType> toastType,
        int toastDoneness) {

    // Call makeToast and use JdkFutureAdapters to convert the Future to a ListenableFuture,
    // The OpendaylightToaster impl already returns a ListenableFuture so the conversion is
    // actually a no-op.

    ListenableFuture<RpcResult<Void>> makeToastFuture = JdkFutureAdapters
            .listenInPoolThread(makeToast(toastType, toastDoneness), executor);

    ListenableFuture<RpcResult<Void>> makeEggsFuture = makeEggs(eggsType);

    // Combine the 2 ListenableFutures into 1 containing a list of RpcResults.

    ListenableFuture<List<RpcResult<Void>>> combinedFutures = Futures
            .allAsList(ImmutableList.of(makeToastFuture, makeEggsFuture));

    // Then transform the RpcResults into 1.

    return Futures.transform(combinedFutures, new AsyncFunction<List<RpcResult<Void>>, RpcResult<Void>>() {
        @Override//from w  w  w.  j av  a  2  s . c  om
        public ListenableFuture<RpcResult<Void>> apply(List<RpcResult<Void>> results) throws Exception {
            boolean atLeastOneSucceeded = false;
            Builder<RpcError> errorList = ImmutableList.builder();
            for (RpcResult<Void> result : results) {
                if (result.isSuccessful()) {
                    atLeastOneSucceeded = true;
                }

                if (result.getErrors() != null) {
                    errorList.addAll(result.getErrors());
                }
            }

            return Futures.immediateFuture(RpcResultBuilder.<Void>status(atLeastOneSucceeded)
                    .withRpcErrors(errorList.build()).build());
        }
    });
}

From source file:org.apache.cassandra.repair.RepairJob.java

/**
 * Runs repair job./*from  w  w w  .ja  v  a 2s.  c om*/
 *
 * This sets up necessary task and runs them on given {@code taskExecutor}.
 * After submitting all tasks, waits until validation with replica completes.
 */
public void run() {
    List<InetAddress> allEndpoints = new ArrayList<>(session.endpoints);
    allEndpoints.add(FBUtilities.getBroadcastAddress());

    ListenableFuture<List<TreeResponse>> validations;
    // Create a snapshot at all nodes unless we're using pure parallel repairs
    if (parallelismDegree != RepairParallelism.PARALLEL) {
        // Request snapshot to all replica
        List<ListenableFuture<InetAddress>> snapshotTasks = new ArrayList<>(allEndpoints.size());
        for (InetAddress endpoint : allEndpoints) {
            SnapshotTask snapshotTask = new SnapshotTask(desc, endpoint);
            snapshotTasks.add(snapshotTask);
            taskExecutor.execute(snapshotTask);
        }
        // When all snapshot complete, send validation requests
        ListenableFuture<List<InetAddress>> allSnapshotTasks = Futures.allAsList(snapshotTasks);
        validations = Futures.transform(allSnapshotTasks,
                new AsyncFunction<List<InetAddress>, List<TreeResponse>>() {
                    public ListenableFuture<List<TreeResponse>> apply(List<InetAddress> endpoints)
                            throws Exception {
                        logger.info(String.format("[repair #%s] requesting merkle trees for %s (to %s)",
                                desc.sessionId, desc.columnFamily, endpoints));
                        if (parallelismDegree == RepairParallelism.SEQUENTIAL)
                            return sendSequentialValidationRequest(endpoints);
                        else
                            return sendDCAwareValidationRequest(endpoints);
                    }
                }, taskExecutor);
    } else {
        logger.info(String.format("[repair #%s] requesting merkle trees for %s (to %s)", desc.sessionId,
                desc.columnFamily, allEndpoints));
        // If not sequential, just send validation request to all replica
        validations = sendValidationRequest(allEndpoints);
    }

    // When all validations complete, submit sync tasks
    ListenableFuture<List<SyncStat>> syncResults = Futures.transform(validations,
            new AsyncFunction<List<TreeResponse>, List<SyncStat>>() {
                public ListenableFuture<List<SyncStat>> apply(List<TreeResponse> trees) throws Exception {
                    InetAddress local = FBUtilities.getLocalAddress();

                    List<SyncTask> syncTasks = new ArrayList<>();
                    // We need to difference all trees one against another
                    for (int i = 0; i < trees.size() - 1; ++i) {
                        TreeResponse r1 = trees.get(i);
                        for (int j = i + 1; j < trees.size(); ++j) {
                            TreeResponse r2 = trees.get(j);
                            SyncTask task;
                            if (r1.endpoint.equals(local) || r2.endpoint.equals(local)) {
                                task = new LocalSyncTask(desc, r1, r2, repairedAt);
                            } else {
                                task = new RemoteSyncTask(desc, r1, r2);
                                // RemoteSyncTask expects SyncComplete message sent back.
                                // Register task to RepairSession to receive response.
                                session.waitForSync(Pair.create(desc, new NodePair(r1.endpoint, r2.endpoint)),
                                        (RemoteSyncTask) task);
                            }
                            syncTasks.add(task);
                            taskExecutor.submit(task);
                        }
                    }
                    return Futures.allAsList(syncTasks);
                }
            }, taskExecutor);

    // When all sync complete, set the final result
    Futures.addCallback(syncResults, new FutureCallback<List<SyncStat>>() {
        public void onSuccess(List<SyncStat> stats) {
            logger.info(String.format("[repair #%s] %s is fully synced", session.getId(), desc.columnFamily));
            SystemDistributedKeyspace.successfulRepairJob(session.getId(), desc.keyspace, desc.columnFamily);
            set(new RepairResult(desc, stats));
        }

        /**
         * Snapshot, validation and sync failures are all handled here
         */
        public void onFailure(Throwable t) {
            logger.warn(String.format("[repair #%s] %s sync failed", session.getId(), desc.columnFamily));
            SystemDistributedKeyspace.failedRepairJob(session.getId(), desc.keyspace, desc.columnFamily, t);
            setException(t);
        }
    }, taskExecutor);

    // Wait for validation to complete
    Futures.getUnchecked(validations);
}