Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:io.druid.query.groupby.epinephelinae.GroupByMergingQueryRunnerV2.java

@Override
public Sequence<Row> run(final Query queryParam, final Map responseContext) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);

    // CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION is here because realtime servers use nested mergeRunners calls
    // (one for the entire query and one for each sink). We only want the outer call to actually do merging with a
    // merge buffer, otherwise the query will allocate too many merge buffers. This is potentially sub-optimal as it
    // will involve materializing the results for each sink before starting to feed them into the outer merge buffer.
    // I'm not sure of a better way to do this without tweaking how realtime servers do queries.
    final boolean forceChainedExecution = query.getContextBoolean(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION,
            false);/*from   w w  w .  j a v a2  s . c  om*/
    final GroupByQuery queryForRunners = query.withOverriddenContext(
            ImmutableMap.<String, Object>of(CTX_KEY_MERGE_RUNNERS_USING_CHAINED_EXECUTION, true));

    if (BaseQuery.getContextBySegment(query, false) || forceChainedExecution) {
        return new ChainedExecutionQueryRunner(exec, queryWatcher, queryables).run(query, responseContext);
    }

    final AggregatorFactory[] combiningAggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs()
            .size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        combiningAggregatorFactories[i] = query.getAggregatorSpecs().get(i).getCombiningFactory();
    }

    final File temporaryStorageDirectory = new File(System.getProperty("java.io.tmpdir"),
            String.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));

    final int priority = BaseQuery.getContextPriority(query, 0);

    // Figure out timeoutAt time now, so we can apply the timeout to both the mergeBufferPool.take and the actual
    // query processing together.
    final Number queryTimeout = query.getContextValue(QueryContextKeys.TIMEOUT, null);
    final long timeoutAt = queryTimeout == null ? JodaUtils.MAX_INSTANT
            : System.currentTimeMillis() + queryTimeout.longValue();

    return new BaseSequence<>(
            new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {
                @Override
                public CloseableGrouperIterator<RowBasedKey, Row> make() {
                    final List<ReferenceCountingResourceHolder> resources = Lists.newArrayList();

                    try {
                        final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(
                                temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
                        final ReferenceCountingResourceHolder<LimitedTemporaryStorage> temporaryStorageHolder = ReferenceCountingResourceHolder
                                .fromCloseable(temporaryStorage);
                        resources.add(temporaryStorageHolder);

                        final ReferenceCountingResourceHolder<ByteBuffer> mergeBufferHolder;
                        try {
                            // This will potentially block if there are no merge buffers left in the pool.
                            final long timeout = timeoutAt - System.currentTimeMillis();
                            if (timeout <= 0 || (mergeBufferHolder = mergeBufferPool.take(timeout)) == null) {
                                throw new QueryInterruptedException(new TimeoutException());
                            }
                            resources.add(mergeBufferHolder);
                        } catch (InterruptedException e) {
                            throw new QueryInterruptedException(e);
                        }

                        Pair<Grouper<RowBasedKey>, Accumulator<Grouper<RowBasedKey>, Row>> pair = RowBasedGrouperHelper
                                .createGrouperAccumulatorPair(query, false, config, mergeBufferHolder.get(),
                                        concurrencyHint, temporaryStorage, spillMapper,
                                        combiningAggregatorFactories);
                        final Grouper<RowBasedKey> grouper = pair.lhs;
                        final Accumulator<Grouper<RowBasedKey>, Row> accumulator = pair.rhs;

                        final ReferenceCountingResourceHolder<Grouper<RowBasedKey>> grouperHolder = ReferenceCountingResourceHolder
                                .fromCloseable(grouper);
                        resources.add(grouperHolder);

                        ListenableFuture<List<Boolean>> futures = Futures
                                .allAsList(Lists.newArrayList(Iterables.transform(queryables,
                                        new Function<QueryRunner<Row>, ListenableFuture<Boolean>>() {
                                            @Override
                                            public ListenableFuture<Boolean> apply(
                                                    final QueryRunner<Row> input) {
                                                if (input == null) {
                                                    throw new ISE(
                                                            "Null queryRunner! Looks to be some segment unmapping action happening");
                                                }

                                                return exec.submit(
                                                        new AbstractPrioritizedCallable<Boolean>(priority) {
                                                            @Override
                                                            public Boolean call() throws Exception {
                                                                try (Releaser bufferReleaser = mergeBufferHolder
                                                                        .increment();
                                                                        Releaser grouperReleaser = grouperHolder
                                                                                .increment()) {
                                                                    final Object retVal = input
                                                                            .run(queryForRunners,
                                                                                    responseContext)
                                                                            .accumulate(grouper, accumulator);

                                                                    // Return true if OK, false if resources were exhausted.
                                                                    return retVal == grouper;
                                                                } catch (QueryInterruptedException e) {
                                                                    throw e;
                                                                } catch (Exception e) {
                                                                    log.error(e,
                                                                            "Exception with one of the sequences!");
                                                                    throw Throwables.propagate(e);
                                                                }
                                                            }
                                                        });
                                            }
                                        })));

                        waitForFutureCompletion(query, futures, timeoutAt - System.currentTimeMillis());

                        return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {
                            @Override
                            public void close() throws IOException {
                                for (Closeable closeable : Lists.reverse(resources)) {
                                    CloseQuietly.close(closeable);
                                }
                            }
                        });
                    } catch (Throwable e) {
                        // Exception caught while setting up the iterator; release resources.
                        for (Closeable closeable : Lists.reverse(resources)) {
                            CloseQuietly.close(closeable);
                        }
                        throw e;
                    }
                }

                @Override
                public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
                    iterFromMake.close();
                }
            });
}

From source file:com.google.idea.blaze.cpp.BlazeConfigurationResolver.java

private ImmutableMap<TargetKey, BlazeResolveConfiguration> buildBlazeConfigurationMap(
        BlazeContext parentContext, BlazeProjectData blazeProjectData,
        ImmutableMap<TargetKey, CToolchainIdeInfo> toolchainLookupMap,
        ImmutableMap<File, VirtualFile> headerRoots) {
    // Type specification needed to avoid incorrect type inference during command line build.
    return Scope.push(parentContext,
            (ScopedFunction<ImmutableMap<TargetKey, BlazeResolveConfiguration>>) context -> {
                context.push(new TimingScope("Build C configuration map"));

                ConcurrentMap<CToolchainIdeInfo, File> compilerWrapperCache = Maps.newConcurrentMap();
                List<ListenableFuture<MapEntry>> mapEntryFutures = Lists.newArrayList();

                for (TargetIdeInfo target : blazeProjectData.targetMap.targets()) {
                    if (target.kind.getLanguageClass() == LanguageClass.C) {
                        ListenableFuture<MapEntry> future = submit(() -> createResolveConfiguration(target,
                                toolchainLookupMap, headerRoots, compilerWrapperCache, blazeProjectData));
                        mapEntryFutures.add(future);
                    }//from   w  ww  . j a v  a2  s  .c o m
                }

                ImmutableMap.Builder<TargetKey, BlazeResolveConfiguration> newResolveConfigurations = ImmutableMap
                        .builder();
                List<MapEntry> mapEntries;
                try {
                    mapEntries = Futures.allAsList(mapEntryFutures).get();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    context.setCancelled();
                    return ImmutableMap.of();
                } catch (ExecutionException e) {
                    IssueOutput.error("Could not build C resolve configurations: " + e).submit(context);
                    LOG.error("Could not build C resolve configurations", e);
                    return ImmutableMap.of();
                }

                for (MapEntry mapEntry : mapEntries) {
                    // Skip over labels that don't have C configuration data.
                    if (mapEntry != null) {
                        newResolveConfigurations.put(mapEntry.targetKey, mapEntry.configuration);
                    }
                }
                return newResolveConfigurations.build();
            });
}

From source file:org.opendaylight.mdsal.mount.cache.impl.tx.CachedDOMWriteTransaction.java

@SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE", justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an "
        + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is "
        + "TransactionCommitFailedException and thus should be deemed as confirmed.")
private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
        final DOMStoreWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort,
        final String phase, final TransactionCommitFailedExceptionMapper exMapper, final Throwable throwable) {

    if (clientSubmitFuture.isDone()) {
        // We must have had failures from multiple cohorts.
        return;/*from   w  ww .  ja  v  a  2 s  .c o m*/
    }

    LOG.warn("Tx: {} Error during phase {}, starting Abort", transaction.getIdentifier(), phase, throwable);
    final Exception e = new RuntimeException("Unexpected error occurred", throwable);

    final TransactionCommitFailedException clientException = exMapper.apply(e);

    // Transaction failed - tell all cohorts to abort.

    @SuppressWarnings("unchecked")
    ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[1];
    canCommitFutures[0] = cohort.abort();

    ListenableFuture<List<Void>> combinedFuture = Futures.allAsList(canCommitFutures);
    Futures.addCallback(combinedFuture, new FutureCallback<List<Void>>() {
        @Override
        public void onSuccess(List<Void> notUsed) {
            // Propagate the original exception to the client.
            clientSubmitFuture.setException(clientException);
        }

        @Override
        public void onFailure(Throwable failure) {
            LOG.error("Tx: {} Error during Abort.", transaction.getIdentifier(), failure);

            // Propagate the original exception as that is what caused the Tx to fail and is
            // what's interesting to the client.
            clientSubmitFuture.setException(clientException);
        }
    }, MoreExecutors.directExecutor());
}

From source file:com.facebook.buck.distributed.DistBuildArtifactCacheImpl.java

@Override
public synchronized void prewarmRemoteContains(ImmutableSet<BuildRule> rulesToBeChecked) {
    @SuppressWarnings("PMD.PrematureDeclaration")
    Stopwatch stopwatch = Stopwatch.createStarted();
    Set<BuildRule> unseenRules = rulesToBeChecked.stream()
            .filter(rule -> !remoteCacheContainsFutures.containsKey(rule)).collect(Collectors.toSet());

    if (unseenRules.isEmpty()) {
        return;//from  w w w.  java  2  s.  c  o  m
    }

    LOG.info("Checking remote cache for [%d] new rules.", unseenRules.size());
    Map<BuildRule, ListenableFuture<RuleKey>> rulesToKeys = Maps.asMap(unseenRules,
            rule -> ruleKeyCalculator.calculate(eventBus, rule));

    ListenableFuture<Map<RuleKey, CacheResult>> keysToCacheResultFuture = Futures
            .transformAsync(Futures.allAsList(rulesToKeys.values()), ruleKeys -> {
                LOG.info("Computing RuleKeys for %d new rules took %dms.", unseenRules.size(),
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                stopwatch.start();
                return multiContainsAsync(ruleKeys);
            }, executorService);

    Map<BuildRule, ListenableFuture<Boolean>> containsResultsForUnseenRules = Maps
            .asMap(unseenRules,
                    rule -> Futures.transform(keysToCacheResultFuture, keysToCacheResult -> Objects
                            .requireNonNull(keysToCacheResult.get(Futures.getUnchecked(rulesToKeys.get(rule))))
                            .getType().isSuccess(), MoreExecutors.directExecutor()));

    remoteCacheContainsFutures.putAll(containsResultsForUnseenRules);
    Futures.allAsList(containsResultsForUnseenRules.values())
            .addListener(() -> LOG.info("Checking the remote cache for %d rules took %dms.", unseenRules.size(),
                    stopwatch.elapsed(TimeUnit.MILLISECONDS)), MoreExecutors.directExecutor());
}

From source file:org.opendaylight.openflowplugin.applications.bulk.o.matic.SalBulkFlowServiceImpl.java

@Override
public Future<RpcResult<Void>> removeFlowsRpc(RemoveFlowsRpcInput input) {
    List<ListenableFuture<RpcResult<RemoveFlowOutput>>> bulkResults = new ArrayList<>();

    for (BulkFlowBaseContentGrouping bulkFlow : input.getBulkFlowItem()) {
        RemoveFlowInputBuilder flowInputBuilder = new RemoveFlowInputBuilder((Flow) bulkFlow);
        final NodeRef nodeRef = bulkFlow.getNode();
        flowInputBuilder.setNode(nodeRef);
        flowInputBuilder.setTableId(bulkFlow.getTableId());
        Future<RpcResult<RemoveFlowOutput>> rpcAddFlowResult = flowService.removeFlow(flowInputBuilder.build());
        bulkResults.add(JdkFutureAdapters.listenInPoolThread(rpcAddFlowResult));
    }/*w  w  w.  j  a v a 2  s  .c o  m*/
    return handleResultFuture(Futures.allAsList(bulkResults));
}

From source file:org.opendaylight.groupbasedpolicy.renderer.ofoverlay.OFOverlayRenderer.java

private ListenableFuture<List<Void>> applyConfig(@Nonnull OfOverlayConfig config) {
    List<ListenableFuture<Void>> configFutures = new ArrayList<>();
    // TODO add to list when implemented
    switchManager.setEncapsulationFormat(config.getEncapsulationFormat());
    endpointManager.setLearningMode(config.getLearningMode());
    policyManager.setLearningMode(config.getLearningMode());
    if (config.getGbpOfoverlayTableOffset() != null) {
        configFutures.add(policyManager.changeOpenFlowTableOffset(config.getGbpOfoverlayTableOffset()));
    }//w w  w.  j a  v a2  s.  c o m
    return Futures.allAsList(configFutures);
}

From source file:org.apache.hive.ptest.execution.HostExecutor.java

/**
 * Executes parallel test until the parallel work queue is empty. Then
 * executes the isolated tests on the host. During each phase if a
 * AbortDroneException is thrown the drone is removed possibly
 * leaving this host with zero functioning drones. If all drones
 * are removed the host will be replaced before the next run.
 *///from   ww w .jav a 2s .com
private void executeTests(final BlockingQueue<TestBatch> parallelWorkQueue,
        final BlockingQueue<TestBatch> isolatedWorkQueue, final Set<TestBatch> failedTestResults)
        throws Exception {
    if (mShutdown) {
        mLogger.warn("Shutting down host " + mHost.getName());
        return;
    }
    mLogger.info("Starting parallel execution on " + mHost.getName());
    List<ListenableFuture<Void>> droneResults = Lists.newArrayList();
    for (final Drone drone : ImmutableList.copyOf(mDrones)) {
        droneResults.add(mExecutor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                TestBatch batch = null;
                Stopwatch sw = Stopwatch.createUnstarted();
                try {
                    do {
                        batch = parallelWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS);
                        if (mShutdown) {
                            mLogger.warn("Shutting down host " + mHost.getName());
                            return null;
                        }
                        if (batch != null) {
                            numParallelBatchesProcessed++;
                            sw.reset().start();
                            try {
                                if (!executeTestBatch(drone, batch, failedTestResults)) {
                                    failedTestResults.add(batch);
                                }
                            } finally {
                                sw.stop();
                                mLogger.info(
                                        "Finished processing parallel batch [{}] on host {}. ElapsedTime(ms)={}",
                                        new Object[] { batch.getName(), getHost().toShortString(),
                                                sw.elapsed(TimeUnit.MILLISECONDS) });
                            }
                        }
                    } while (!mShutdown && !parallelWorkQueue.isEmpty());
                } catch (AbortDroneException ex) {
                    mDrones.remove(drone); // return value not checked due to concurrent access
                    mLogger.error("Aborting drone during parallel execution", ex);
                    if (batch != null) {
                        Preconditions.checkState(parallelWorkQueue.add(batch),
                                "Could not add batch to parallel queue " + batch);
                    }
                }
                return null;
            }
        }));
    }
    if (mShutdown) {
        mLogger.warn("Shutting down host " + mHost.getName());
        return;
    }
    Futures.allAsList(droneResults).get();
    mLogger.info("Starting isolated execution on " + mHost.getName());
    for (Drone drone : ImmutableList.copyOf(mDrones)) {
        TestBatch batch = null;
        Stopwatch sw = Stopwatch.createUnstarted();
        try {
            do {

                batch = isolatedWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS);
                if (batch != null) {
                    numIsolatedBatchesProcessed++;
                    sw.reset().start();
                    try {
                        if (!executeTestBatch(drone, batch, failedTestResults)) {
                            failedTestResults.add(batch);
                        }
                    } finally {
                        sw.stop();
                        mLogger.info("Finished processing isolated batch [{}] on host {}. ElapsedTime(ms)={}",
                                new Object[] { batch.getName(), getHost().toShortString(),
                                        sw.elapsed(TimeUnit.MILLISECONDS) });
                    }
                }
            } while (!mShutdown && !isolatedWorkQueue.isEmpty());
        } catch (AbortDroneException ex) {
            mDrones.remove(drone); // return value not checked due to concurrent access
            mLogger.error("Aborting drone during isolated execution", ex);
            if (batch != null) {
                Preconditions.checkState(isolatedWorkQueue.add(batch),
                        "Could not add batch to isolated queue " + batch);
            }
        }
    }
}

From source file:com.google.pubsub.flic.controllers.GCEController.java

/**
 * Instantiates the load test on Google Compute Engine.
 *//*from  w  ww .j  a  v a  2 s .  c om*/
private GCEController(String projectName, Map<String, Map<ClientParams, Integer>> types,
        ScheduledExecutorService executor, Storage storage, Compute compute, Pubsub pubsub) throws Throwable {
    super(executor);
    this.projectName = projectName;
    this.types = types;
    this.storage = storage;
    this.compute = compute;

    // For each unique type of CPS Publisher, create a Topic if it does not already exist, and then
    // delete and recreate any subscriptions attached to it so that we do not have backlog from
    // previous runs.
    List<SettableFuture<Void>> pubsubFutures = new ArrayList<>();
    types.values().forEach(paramsMap -> {
        paramsMap.keySet().stream().map(p -> p.getClientType()).distinct().filter(ClientType::isCpsPublisher)
                .forEach(clientType -> {
                    SettableFuture<Void> pubsubFuture = SettableFuture.create();
                    pubsubFutures.add(pubsubFuture);
                    executor.execute(() -> {
                        String topic = Client.TOPIC_PREFIX + Client.getTopicSuffix(clientType);
                        try {
                            pubsub.projects().topics()
                                    .create("projects/" + projectName + "/topics/" + topic, new Topic())
                                    .execute();
                        } catch (GoogleJsonResponseException e) {
                            if (e.getStatusCode() != ALREADY_EXISTS) {
                                pubsubFuture.setException(e);
                                return;
                            }
                            log.info("Topic already exists, reusing.");
                        } catch (IOException e) {
                            pubsubFuture.setException(e);
                            return;
                        }
                        // Recreate each subscription attached to the topic.
                        paramsMap.keySet().stream()
                                .filter(p -> p.getClientType() == clientType.getSubscriberType())
                                .map(p -> p.subscription).forEach(subscription -> {
                                    try {
                                        pubsub.projects().subscriptions().delete(
                                                "projects/" + projectName + "/subscriptions/" + subscription)
                                                .execute();
                                    } catch (IOException e) {
                                        log.debug(
                                                "Error deleting subscription, assuming it has not yet been created.",
                                                e);
                                    }
                                    try {
                                        pubsub.projects().subscriptions().create(
                                                "projects/" + projectName + "/subscriptions/" + subscription,
                                                new Subscription()
                                                        .setTopic(
                                                                "projects/" + projectName + "/topics/" + topic)
                                                        .setAckDeadlineSeconds(10))
                                                .execute();
                                    } catch (IOException e) {
                                        pubsubFuture.setException(e);
                                    }
                                });
                        pubsubFuture.set(null);
                    });
                });
    });
    try {
        createStorageBucket();
        createFirewall();

        List<SettableFuture<Void>> filesRemaining = new ArrayList<>();
        Files.walk(Paths.get(resourceDirectory)).filter(Files::isRegularFile).forEach(filePath -> {
            SettableFuture<Void> fileRemaining = SettableFuture.create();
            filesRemaining.add(fileRemaining);
            executor.execute(() -> {
                try {
                    uploadFile(filePath);
                    fileRemaining.set(null);
                } catch (Exception e) {
                    fileRemaining.setException(e);
                }
            });
        });
        List<SettableFuture<Void>> createGroupFutures = new ArrayList<>();
        types.forEach((zone, paramsMap) -> paramsMap.forEach((param, n) -> {
            SettableFuture<Void> createGroupFuture = SettableFuture.create();
            createGroupFutures.add(createGroupFuture);
            executor.execute(() -> {
                try {
                    createManagedInstanceGroup(zone, param.getClientType());
                    createGroupFuture.set(null);
                } catch (Exception e) {
                    createGroupFuture.setException(e);
                }
            });
        }));

        // Wait for files and instance groups to be created.
        Futures.allAsList(pubsubFutures).get();
        log.info("Pub/Sub actions completed.");
        Futures.allAsList(filesRemaining).get();
        log.info("File uploads completed.");
        Futures.allAsList(createGroupFutures).get();
        log.info("Instance group creation completed.");

        // Everything is set up, let's start our instances
        log.info("Starting instances.");
        List<SettableFuture<Void>> resizingFutures = new ArrayList<>();
        types.forEach((zone, paramsMap) -> paramsMap.forEach((type, n) -> {
            SettableFuture<Void> resizingFuture = SettableFuture.create();
            resizingFutures.add(resizingFuture);
            executor.execute(() -> {
                try {
                    startInstances(zone, type.getClientType(), n);
                    resizingFuture.set(null);
                } catch (Exception e) {
                    resizingFuture.setException(e);
                }
            });
        }));
        Futures.allAsList(resizingFutures).get();

        // We wait for all instances to finish starting, and get the external network address of each
        // newly created instance.
        List<SettableFuture<Void>> startFutures = new ArrayList<>();
        for (String zone : types.keySet()) {
            Map<ClientParams, Integer> paramsMap = types.get(zone);
            for (ClientParams type : paramsMap.keySet()) {
                SettableFuture<Void> startFuture = SettableFuture.create();
                startFutures.add(startFuture);
                executor.execute(() -> {
                    int numErrors = 0;
                    while (true) {
                        try {
                            addInstanceGroupInfo(zone, type);
                            startFuture.set(null);
                            return;
                        } catch (IOException e) {
                            numErrors++;
                            if (numErrors > 3) {
                                startFuture.setException(new Exception("Failed to get instance information."));
                                return;
                            }
                            log.error("Transient error getting status for instance group, continuing", e);
                        }
                    }
                });
            }
        }

        Futures.allAsList(startFutures).get();
        log.info("Successfully started all instances.");
    } catch (ExecutionException e) {
        shutdown(e.getCause());
        throw e.getCause();
    } catch (Exception e) {
        shutdown(e);
        throw e;
    }
}

From source file:com.yahoo.yqlplus.engine.internal.java.sequences.Sequences.java

public static <ROW, SEQUENCE extends Iterable<ROW>, SET> ListenableFuture<List<ROW>> invokeSet(
        final ListeningExecutorService workExecutor, final Function<List<SET>, SEQUENCE> source,
        final List<SET> keys, Tracer tracer, Timeout timeout, TimeoutHandler handler) throws Exception {
    // TODO OPTIMIZE: List not needed in this case
    List<ListenableFuture<SEQUENCE>> results = Lists.newArrayList();
    results.add(workExecutor.submit(createJob(tracer, source, keys)));
    ListenableFuture<List<SEQUENCE>> gather = Futures.allAsList(results);
    return handler.withTimeout(gatherResults(workExecutor, gather, 1), timeout.verify(),
            timeout.getTickUnits());/*from  w ww  .jav  a 2s. c o m*/
}

From source file:dmg.cells.services.CoreRoutingManager.java

private ListenableFuture<List<CellMessage>> sendToPeers(Serializable msg, Collection<CellTunnelInfo> tunnels,
        long timeout) {
    List<FutureCellMessageAnswerable> futures = new ArrayList<>();
    CellAddressCore peer = new CellAddressCore(nucleus.getCellName());
    for (CellTunnelInfo tunnel : tunnels) {
        CellAddressCore domain = new CellAddressCore("*", tunnel.getRemoteCellDomainInfo().getCellDomainName());
        FutureCellMessageAnswerable future = new FutureCellMessageAnswerable();
        futures.add(future);/*  w  w w .  j av a2s. com*/
        nucleus.sendMessage(new CellMessage(new CellPath(domain, peer), msg), false, true, true, future,
                MoreExecutors.directExecutor(), timeout);
    }
    return Futures.allAsList(futures);
}