Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:dagger.producers.internal.SetProducer.java

/**
 * Returns a future {@link Set} whose iteration order is that of the elements given by each of the
 * producers, which are invoked in the order given at creation.
 *
 * <p>If any of the delegate collections, or any elements therein, are null, then this future will
 * fail with a NullPointerException.//from   w  w  w.ja  v  a2  s. co m
 *
 * <p>Canceling this future will attempt to cancel all of the component futures, and if any of the
 * delegate futures fails or is canceled, this one is, too.
 *
 * @throws NullPointerException if any of the delegate producers return null
 */
@Override
public ListenableFuture<Set<T>> compute() {
    List<ListenableFuture<T>> individualFutures = new ArrayList<ListenableFuture<T>>(
            individualProducers.size());
    for (Producer<T> producer : individualProducers) {
        individualFutures.add(checkNotNull(producer.get()));
    }

    // Presize the list of collections produced by the amount of collectionProducers, with one more
    // for the consolidate individualFutures from Futures.allAsList.
    List<ListenableFuture<? extends Collection<T>>> futureCollections = new ArrayList<ListenableFuture<? extends Collection<T>>>(
            collectionProducers.size() + 1);
    futureCollections.add(Futures.allAsList(individualFutures));
    for (Producer<Collection<T>> producer : collectionProducers) {
        futureCollections.add(checkNotNull(producer.get()));
    }
    return transform(Futures.allAsList(futureCollections), new Function<List<Collection<T>>, Set<T>>() {
        @Override
        public Set<T> apply(List<Collection<T>> sets) {
            ImmutableSet.Builder<T> builder = ImmutableSet.builder();
            for (Collection<T> set : sets) {
                builder.addAll(set);
            }
            return builder.build();
        }
    }, directExecutor());
}

From source file:com.google.pubsub.flic.controllers.Controller.java

/**
 * Gets the results for all available types.
 *
 * @return the map from type to result, every type running is a valid key
 *///from  w  ww .j a  va  2 s. co  m
public Map<Client.ClientType, LoadtestStats> getStatsForAllClientTypes() {
    final Map<Client.ClientType, LoadtestStats> results = new HashMap<>();
    List<ListenableFuture<Void>> resultFutures = new ArrayList<>();
    for (Client.ClientType type : Client.ClientType.values()) {
        SettableFuture<Void> resultFuture = SettableFuture.create();
        resultFutures.add(resultFuture);
        executor.submit(() -> {
            try {
                results.put(type, getStatsForClientType(type));
                resultFuture.set(null);
            } catch (Throwable t) {
                resultFuture.setException(t);
            }
        });
    }
    try {
        Futures.allAsList(resultFutures).get();
    } catch (ExecutionException | InterruptedException e) {
        log.error("Failed health check, will return results accumulated during test up to now.",
                e instanceof ExecutionException ? e.getCause() : e);
    }
    return results;
}

From source file:com.facebook.buck.rules.UnskippedRulesTracker.java

private ListenableFuture<Void> acquireReferences(ImmutableSet<BuildRule> rules) {
    ImmutableList.Builder<ListenableFuture<Void>> futures = ImmutableList.builder();
    for (BuildRule rule : rules) {
        futures.add(acquireReference(rule));
    }/*from   w  w w .  ja va 2s  .c o m*/
    return Futures.transform(Futures.allAsList(futures.build()), NULL_FUNCTION);
}

From source file:io.druid.client.CachingQueryRunner.java

@Override
public Sequence<T> run(Query<T> query, Map<String, Object> responseContext) {
    final CacheStrategy strategy = toolChest.getCacheStrategy(query);

    final boolean populateCache = query.getContextPopulateCache(true) && strategy != null
            && cacheConfig.isPopulateCache() && cacheConfig.isQueryCacheable(query);

    final boolean useCache = query.getContextUseCache(true) && strategy != null && cacheConfig.isUseCache()
            && cacheConfig.isQueryCacheable(query);

    final Cache.NamedKey key;
    if (strategy != null && (useCache || populateCache)) {
        key = CacheUtil.computeSegmentCacheKey(segmentIdentifier, segmentDescriptor,
                strategy.computeCacheKey(query));
    } else {/*from  ww  w  .  ja va2  s  . com*/
        key = null;
    }

    if (useCache) {
        final Function cacheFn = strategy.pullFromCache();
        final byte[] cachedResult = cache.get(key);
        if (cachedResult != null) {
            final TypeReference cacheObjectClazz = strategy.getCacheObjectClazz();

            return Sequences.map(new BaseSequence<>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {
                @Override
                public Iterator<T> make() {
                    try {
                        if (cachedResult.length == 0) {
                            return Iterators.emptyIterator();
                        }

                        return mapper.readValues(mapper.getFactory().createParser(cachedResult),
                                cacheObjectClazz);
                    } catch (IOException e) {
                        throw Throwables.propagate(e);
                    }
                }

                @Override
                public void cleanup(Iterator<T> iterFromMake) {
                }
            }), cacheFn);
        }
    }

    final Collection<ListenableFuture<?>> cacheFutures = Collections
            .synchronizedList(Lists.<ListenableFuture<?>>newLinkedList());
    if (populateCache) {
        final Function cacheFn = strategy.prepareForCache();
        final List<Object> cacheResults = Lists.newLinkedList();

        return Sequences.withEffect(Sequences.map(base.run(query, responseContext), new Function<T, T>() {
            @Override
            public T apply(final T input) {
                cacheFutures.add(backgroundExecutorService.submit(new Runnable() {
                    @Override
                    public void run() {
                        cacheResults.add(cacheFn.apply(input));
                    }
                }));
                return input;
            }
        }), new Runnable() {
            @Override
            public void run() {
                try {
                    Futures.allAsList(cacheFutures).get();
                    CacheUtil.populate(cache, mapper, key, cacheResults);
                } catch (Exception e) {
                    log.error(e, "Error while getting future for cache task");
                    throw Throwables.propagate(e);
                }
            }
        }, backgroundExecutorService);
    } else {
        return base.run(query, responseContext);
    }
}

From source file:com.continuuity.loom.common.zookeeper.lib.ZKMap.java

public void clear() {
    currentView.set(Collections.<String, T>emptyMap());
    // Hint: again, we can try to make removal more efficient by cleaning only when in-mem collection cleaned smth,
    //       but then we may face races...
    NodeChildren nodeChildren = Futures.getUnchecked(zkClient.getChildren(""));
    List<ListenableFuture<String>> deleteFutures = Lists.newArrayList();
    for (String node : nodeChildren.getChildren()) {
        deleteFutures.add(ZKClientExt.delete(zkClient, getNodePath(node), true));
    }/*from www . jav a  2  s  .  c  om*/
    Futures.getUnchecked(Futures.allAsList(deleteFutures));
}

From source file:dagger.producers.internal.SetOfProducedProducer.java

/**
 * Returns a future {@link Set} of {@link Produced} values whose iteration order is that of the
 * elements given by each of the producers, which are invoked in the order given at creation.
 *
 * <p>If any of the delegate collections, or any elements therein, are null, then that
 * corresponding {@code Produced} element will fail with a NullPointerException.
 *
 * <p>Canceling this future will attempt to cancel all of the component futures; but if any of the
 * delegate futures fail or are canceled, this future succeeds, with the appropriate failed
 * {@link Produced}.//from   w w  w . j  ava2s  . c  o  m
 *
 * @throws NullPointerException if any of the delegate producers return null
 */
@Override
public ListenableFuture<Set<Produced<T>>> compute() {
    List<ListenableFuture<? extends Produced<? extends Collection<T>>>> futureProducedCollections = new ArrayList<ListenableFuture<? extends Produced<? extends Collection<T>>>>(
            individualProducers.size() + collectionProducers.size());
    for (Producer<T> producer : individualProducers) {
        // TODO(ronshapiro): Don't require individual productions to be added to a collection just to
        // be materialized into futureProducedCollections.
        futureProducedCollections.add(Producers
                .createFutureProduced(Producers.createFutureSingletonSet(checkNotNull(producer.get()))));
    }
    for (Producer<Collection<T>> producer : collectionProducers) {
        futureProducedCollections.add(Producers.createFutureProduced(checkNotNull(producer.get())));
    }

    return Futures.transform(Futures.allAsList(futureProducedCollections),
            new Function<List<Produced<? extends Collection<T>>>, Set<Produced<T>>>() {
                @Override
                public Set<Produced<T>> apply(List<Produced<? extends Collection<T>>> producedCollections) {
                    ImmutableSet.Builder<Produced<T>> builder = ImmutableSet.builder();
                    for (Produced<? extends Collection<T>> producedCollection : producedCollections) {
                        try {
                            Collection<T> collection = producedCollection.get();
                            if (collection == null) {
                                // TODO(beder): This is a vague exception. Can we somehow point to the failing
                                // producer? See the similar comment in the component writer about null
                                // provisions.
                                builder.add(Produced.<T>failed(new NullPointerException(
                                        "Cannot contribute a null collection into a producer set binding when"
                                                + " it's injected as Set<Produced<T>>.")));
                            } else {
                                for (T value : collection) {
                                    if (value == null) {
                                        builder.add(Produced.<T>failed(new NullPointerException(
                                                "Cannot contribute a null element into a producer set binding"
                                                        + " when it's injected as Set<Produced<T>>.")));
                                    } else {
                                        builder.add(Produced.successful(value));
                                    }
                                }
                            }
                        } catch (ExecutionException e) {
                            builder.add(Produced.<T>failed(e.getCause()));
                        }
                    }
                    return builder.build();
                }
            }, directExecutor());
}

From source file:co.cask.cdap.internal.app.deploy.pipeline.ProgramGenerationStage.java

@Override
public void process(final ApplicationDeployable input) throws Exception {
    ImmutableList.Builder<Program> programs = ImmutableList.builder();
    final ApplicationSpecification appSpec = input.getSpecification();
    final String applicationName = appSpec.getName();

    final ArchiveBundler bundler = new ArchiveBundler(input.getLocation());

    // Make sure the namespace directory exists
    Id.Namespace namespaceId = input.getId().getNamespace();
    Location namespacedLocation = namespacedLocationFactory.get(namespaceId);
    // Note: deployApplication/deployAdapters have already checked for namespaceDir existence, so not checking again
    // Make sure we have a directory to store the original artifact.
    final Location appFabricDir = namespacedLocation.append(configuration.get(Constants.AppFabric.OUTPUT_DIR));

    // Check exists, create, check exists again to avoid failure due to race condition.
    if (!appFabricDir.exists() && !appFabricDir.mkdirs() && !appFabricDir.exists()) {
        throw new IOException(String.format("Failed to create directory %s", appFabricDir.toURI().getPath()));
    }//  w w  w  .  j  ava  2s . co m

    // Now, we iterate through all ProgramSpecification and generate programs
    Iterable<ProgramSpecification> specifications = Iterables.concat(appSpec.getMapReduce().values(),
            appSpec.getFlows().values(), appSpec.getWorkflows().values(), appSpec.getServices().values(),
            appSpec.getSpark().values(), appSpec.getWorkers().values());

    // Generate webapp program if required
    Set<String> servingHostNames = WebappProgramRunner
            .getServingHostNames(Locations.newInputSupplier(input.getLocation()));

    if (!servingHostNames.isEmpty()) {
        specifications = Iterables.concat(specifications,
                ImmutableList.of(createWebappSpec(ProgramType.WEBAPP.toString().toLowerCase())));
    }

    ListeningExecutorService executorService = MoreExecutors.listeningDecorator(
            Executors.newFixedThreadPool(10, Threads.createDaemonThreadFactory("program-gen-%d")));
    try {
        List<ListenableFuture<Location>> futures = Lists.newArrayList();
        for (final ProgramSpecification spec : specifications) {
            ListenableFuture<Location> future = executorService.submit(new Callable<Location>() {
                @Override
                public Location call() throws Exception {
                    ProgramType type = ProgramTypes.fromSpecification(spec);
                    String name = String.format(Locale.ENGLISH, "%s/%s", applicationName, type);
                    Location programDir = appFabricDir.append(name);
                    if (!programDir.exists()) {
                        programDir.mkdirs();
                    }
                    Location output = programDir.append(String.format("%s.jar", spec.getName()));
                    Id.Program programId = Id.Program.from(input.getId(), type, spec.getName());
                    return ProgramBundle.create(programId, bundler, output, spec.getClassName(), appSpec);
                }
            });
            futures.add(future);
        }

        for (Location jarLocation : Futures.allAsList(futures).get()) {
            programs.add(Programs.create(jarLocation, null));
        }
    } finally {
        executorService.shutdown();
    }

    // moves the <appfabricdir>/archive/<app-name>.jar to <appfabricdir>/<app-name>/archive/<app-name>.jar
    // Cannot do this before starting the deploy pipeline because appId could be null at that time.
    // However, it is guaranteed to be non-null from VerificationsStage onwards
    Location newArchiveLocation = appFabricDir.append(applicationName).append(Constants.ARCHIVE_DIR);
    moveAppArchiveUnderAppDirectory(input.getLocation(), newArchiveLocation);
    Location programLocation = newArchiveLocation.append(input.getLocation().getName());
    ApplicationDeployable updatedAppDeployable = new ApplicationDeployable(input.getId(),
            input.getSpecification(), input.getExistingAppSpec(), input.getApplicationDeployScope(),
            programLocation);

    // Emits the received specification with programs.
    emit(new ApplicationWithPrograms(updatedAppDeployable, programs.build()));
}

From source file:org.opendaylight.mdsal.singleton.dom.impl.AbstractClusterSingletonServiceProviderImpl.java

@Override
public final void close() {
    LOG.debug("Close method for ClusterSingletonService Provider {}", this.getClass().getName());

    if (serviceEntityListenerReg != null) {
        serviceEntityListenerReg.close();
        serviceEntityListenerReg = null;
    }/*from w ww .  j  av a 2s .co  m*/

    final List<ListenableFuture<List<Void>>> listGroupCloseListFuture = new ArrayList<>();

    for (final ClusterSingletonServiceGroup<P, E, C> serviceGroup : serviceGroupMap.values()) {
        listGroupCloseListFuture.add(serviceGroup.closeClusterSingletonGroup());
    }

    final ListenableFuture<List<List<Void>>> finalCloseFuture = Futures.allAsList(listGroupCloseListFuture);
    Futures.addCallback(finalCloseFuture, new FutureCallback<List<List<Void>>>() {

        @Override
        public void onSuccess(final List<List<Void>> result) {
            cleaningProvider(null);
        }

        @Override
        public void onFailure(final Throwable throwable) {
            cleaningProvider(throwable);
        }
    });
}

From source file:com.android.tools.perflib.heap.memoryanalyzer.MemoryAnalyzer.java

/**
 * Analyze the given {@code captureGroup}. It is highly recommended to call this method on the
 * same thread as that of the {@code synchronizingExecutor} to avoid race conditions.
 *
 * @param captureGroup          captures to analyze
 * @param synchronizingExecutor executor to synchronize the results aggregation
 * @param taskExecutor          executor service to run the analyzer tasks on
 * @return an AnalysisReport in which the caller can listen to
 *//*from w w  w  .j av  a  2 s  . c o  m*/
@NonNull
@Override
public AnalysisReport analyze(@NonNull CaptureGroup captureGroup,
        @NonNull Set<AnalysisReport.Listener> listeners, @NonNull Set<? extends AnalyzerTask> tasks,
        @NonNull final Executor synchronizingExecutor, @NonNull ExecutorService taskExecutor) {
    // TODO move this to Analyzer once Configuration is implemented
    if (mOutstandingReport != null) {
        return mOutstandingReport;
    }

    for (AnalyzerTask task : tasks) {
        if (task instanceof MemoryAnalyzerTask) {
            mTasks.add((MemoryAnalyzerTask) task);
        }
    }

    mOutstandingReport = new AnalysisReport();
    mOutstandingReport.addResultListeners(listeners);

    List<ListenableFutureTask<List<AnalysisResultEntry>>> futuresList = new ArrayList<ListenableFutureTask<List<AnalysisResultEntry>>>();

    for (final Capture capture : captureGroup.getCaptures()) {
        if (accept(capture)) {
            final Snapshot snapshot = capture.getRepresentation(Snapshot.class);
            if (snapshot == null) {
                continue;
            }

            List<Heap> heapsToUse = new ArrayList<Heap>(snapshot.getHeaps().size());
            for (Heap heap : snapshot.getHeaps()) {
                if ("app".equals(heap.getName())) {
                    heapsToUse.add(heap);
                    break;
                }
            }
            final MemoryAnalyzerTask.Configuration configuration = new MemoryAnalyzerTask.Configuration(
                    heapsToUse);

            for (final MemoryAnalyzerTask task : mTasks) {
                final ListenableFutureTask<List<AnalysisResultEntry>> futureTask = ListenableFutureTask
                        .create(new Callable<List<AnalysisResultEntry>>() {
                            @Override
                            public List<AnalysisResultEntry> call() throws Exception {
                                if (mCancelAnalysis) {
                                    return null;
                                }

                                return task.analyze(configuration, snapshot);
                            }
                        });
                Futures.addCallback(futureTask, new FutureCallback<List<AnalysisResultEntry>>() {
                    @Override
                    public void onSuccess(List<AnalysisResultEntry> result) {
                        if (mCancelAnalysis) {
                            return;
                        }

                        mOutstandingReport.addAnalysisResultEntries(result);
                    }

                    @Override
                    public void onFailure(@Nullable Throwable t) {

                    }
                }, synchronizingExecutor);
                taskExecutor.submit(futureTask);
                futuresList.add(futureTask);
            }
        }
    }

    mRunningAnalyzers = Futures.allAsList(futuresList);
    Futures.addCallback(mRunningAnalyzers, new FutureCallback<List<List<AnalysisResultEntry>>>() {
        @Override
        public void onSuccess(@Nullable List<List<AnalysisResultEntry>> result) {
            mAnalysisComplete = true;
            mOutstandingReport.setCompleted();
        }

        @Override
        public void onFailure(@NonNull Throwable t) {
            mAnalysisComplete = true;
            mOutstandingReport.setCancelled();
        }
    }, synchronizingExecutor);
    return mOutstandingReport;
}

From source file:org.opendaylight.controller.cluster.sharding.ShardProxyTransaction.java

@Override
public ListenableFuture<Boolean> validate() {
    LOG.debug("Validating transaction for shard {}", shardRoot);

    checkTransactionReadied();/*from w  ww. jav a2 s  . co m*/
    final List<ListenableFuture<Boolean>> futures = cohorts.stream()
            .map(DOMStoreThreePhaseCommitCohort::canCommit).collect(Collectors.toList());
    final SettableFuture<Boolean> ret = SettableFuture.create();

    Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Boolean>>() {
        @Override
        public void onSuccess(final List<Boolean> result) {
            ret.set(true);
        }

        @Override
        public void onFailure(final Throwable throwable) {
            ret.setException(throwable);
        }
    }, MoreExecutors.directExecutor());

    return ret;
}