Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:com.facebook.buck.rules.CachingBuildEngine.java

@Override
public final ListenableFuture<BuildRuleSuccess> build(final BuildContext context, final BuildRule rule) {

    final SettableFuture<BuildRuleSuccess> newFuture = SettableFuture.create();
    SettableFuture<BuildRuleSuccess> existingFuture = results.putIfAbsent(rule.getBuildTarget(), newFuture);

    // If the future was already in results for this build rule, return what was there.
    if (existingFuture != null) {
        return existingFuture;
    }/*w  w  w .ja v a 2 s.com*/

    // Build all of the deps first and then schedule a callback for this rule to build itself once
    // all of those rules are done building.
    try {
        // Invoke every dep's build() method and create an uber-ListenableFuture that represents the
        // successful completion of all deps.
        List<ListenableFuture<BuildRuleSuccess>> builtDeps = Lists
                .newArrayListWithCapacity(rule.getDeps().size());
        for (BuildRule dep : rule.getDeps()) {
            builtDeps.add(build(context, dep));
        }
        ListenableFuture<List<BuildRuleSuccess>> allBuiltDeps = Futures.allAsList(builtDeps);

        // Schedule this rule to build itself once all of the deps are built.
        context.getStepRunner().addCallback(allBuiltDeps, new FutureCallback<List<BuildRuleSuccess>>() {

            private final BuckEventBus eventBus = context.getEventBus();

            private final OnDiskBuildInfo onDiskBuildInfo = context
                    .createOnDiskBuildInfoFor(rule.getBuildTarget());

            /**
             * It is imperative that:
             * <ol>
             *   <li>The {@link BuildInfoRecorder} is not constructed until all of the
             *       {@link Buildable}'s {@code deps} are guaranteed to be built. This ensures that
             *       the {@link RuleKey} will be available before the {@link BuildInfoRecorder} is
             *       constructed.
             *       <p>
             *       This is why a {@link Supplier} is used.
             *   <li>Only one {@link BuildInfoRecorder} is created per {@link Buildable}. This
             *       ensures that all build-related information for a {@link Buildable} goes though
             *       a single recorder, whose data will be persisted in {@link #onSuccess(List)}.
             *       <p>
             *       This is why {@link Suppliers#memoize(Supplier)} is used.
             * </ol>
             */
            private final Supplier<BuildInfoRecorder> buildInfoRecorder = Suppliers
                    .memoize(new Supplier<BuildInfoRecorder>() {
                        @Override
                        public BuildInfoRecorder get() {
                            RuleKey ruleKey;
                            RuleKey ruleKeyWithoutDeps;
                            ruleKey = rule.getRuleKey();
                            ruleKeyWithoutDeps = rule.getRuleKeyWithoutDeps();

                            return context.createBuildInfoRecorder(rule.getBuildTarget(), ruleKey,
                                    ruleKeyWithoutDeps);
                        }
                    });

            private boolean startOfBuildWasRecordedOnTheEventBus = false;

            @Override
            public void onSuccess(List<BuildRuleSuccess> deps) {
                // Record the start of the build.
                eventBus.post(BuildRuleEvent.started(rule));
                startOfBuildWasRecordedOnTheEventBus = true;

                ruleKeys.putIfAbsent(rule.getBuildTarget(), rule.getRuleKey());
                BuildResult result = buildOnceDepsAreBuilt(rule, context, onDiskBuildInfo,
                        buildInfoRecorder.get(), shouldTryToFetchFromCache(deps));
                if (result.getStatus() == BuildRuleStatus.SUCCESS) {
                    recordBuildRuleSuccess(result);
                } else {
                    recordBuildRuleFailure(result);
                }
            }

            private void recordBuildRuleSuccess(BuildResult result) {
                // Make sure that all of the local files have the same values they would as if the
                // rule had been built locally.
                BuildRuleSuccess.Type success = result.getSuccess();
                if (success.shouldWriteRecordedMetadataToDiskAfterBuilding()) {
                    try {
                        boolean clearExistingMetadata = success.shouldClearAndOverwriteMetadataOnDisk();
                        buildInfoRecorder.get().writeMetadataToDisk(clearExistingMetadata);
                    } catch (IOException e) {
                        onFailure(e);
                    }
                }

                doHydrationAfterBuildStepsFinish(rule, result, onDiskBuildInfo);

                // Do the post to the event bus immediately after the future is set so that the
                // build time measurement is as accurate as possible.
                logBuildRuleFinished(result);

                // Only now that the rule should be in a completely valid state, resolve the future.
                BuildRuleSuccess buildRuleSuccess = new BuildRuleSuccess(rule, result.getSuccess());
                newFuture.set(buildRuleSuccess);

                // Finally, upload to the artifact cache.
                if (result.getSuccess().shouldUploadResultingArtifact()) {
                    buildInfoRecorder.get().performUploadToArtifactCache(context.getArtifactCache(), eventBus);
                }
            }

            @Override
            public void onFailure(Throwable failure) {
                recordBuildRuleFailure(new BuildResult(failure));
            }

            private void recordBuildRuleFailure(BuildResult result) {
                // TODO(mbolin): Delete all genfiles and metadata, as they are not guaranteed to be
                // valid at this point?

                // Note that startOfBuildWasRecordedOnTheEventBus will be false if onSuccess() was
                // never invoked.
                if (startOfBuildWasRecordedOnTheEventBus) {
                    logBuildRuleFinished(result);
                }

                // It seems possible (albeit unlikely) that something could go wrong in
                // recordBuildRuleSuccess() after buildRuleResult has been resolved such that Buck
                // would attempt to resolve the future again, which would fail.
                newFuture.setException(result.getFailure());
            }

            private void logBuildRuleFinished(BuildResult result) {
                eventBus.post(BuildRuleEvent.finished(rule, result.getStatus(), result.getCacheResult(),
                        Optional.fromNullable(result.getSuccess())));
            }
        });
    } catch (Throwable failure) {
        // This is a defensive catch block: if buildRuleResult is never satisfied, then Buck will
        // hang because a callback that is waiting for this rule's future to complete will never be
        // executed.
        newFuture.setException(failure);
    }

    return newFuture;
}

From source file:com.facebook.buck.distributed.DistributedBuildFileHashes.java

private static ListenableFuture<ImmutableMap<BuildRule, RuleKey>> ruleKeyComputation(ActionGraph actionGraph,
        final LoadingCache<ProjectFilesystem, DefaultRuleKeyBuilderFactory> ruleKeyFactories,
        ListeningExecutorService executorService) {
    List<ListenableFuture<Map.Entry<BuildRule, RuleKey>>> ruleKeyEntries = new ArrayList<>();
    for (final BuildRule rule : actionGraph.getNodes()) {
        ruleKeyEntries.add(executorService.submit(new Callable<Map.Entry<BuildRule, RuleKey>>() {
            @Override// w w w  .j  av a2 s  .c o m
            public Map.Entry<BuildRule, RuleKey> call() throws Exception {
                return Maps.immutableEntry(rule, ruleKeyFactories.get(rule.getProjectFilesystem()).build(rule));
            }
        }));
    }
    ListenableFuture<List<Map.Entry<BuildRule, RuleKey>>> ruleKeyComputation = Futures
            .allAsList(ruleKeyEntries);
    return Futures.transform(ruleKeyComputation,
            new Function<List<Map.Entry<BuildRule, RuleKey>>, ImmutableMap<BuildRule, RuleKey>>() {
                @Override
                public ImmutableMap<BuildRule, RuleKey> apply(List<Map.Entry<BuildRule, RuleKey>> input) {
                    return ImmutableMap.copyOf(input);
                }
            }, executorService);
}

From source file:brooklyn.networking.util.ConcurrentReachableAddressFinder.java

/**
 * Checks if any any of the given addresses are reachable. It checks them all concurrently, and
 * sets reference if found, or returns false.
 *///ww w. jav a 2 s  .com
private Predicate<Iterable<String>> updateRefOnAddressReachable(final AtomicReference<String> reachableAddress,
        final Duration timeout) {
    return new Predicate<Iterable<String>>() {
        @Override
        public boolean apply(Iterable<String> input) {
            List<ListenableFuture<?>> futures = Lists.newArrayList();
            for (final String addr : input) {
                futures.add(userExecutor.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            if (pingTester.isReachable(addr, timeout)) {
                                // only set if this was found first
                                reachableAddress.compareAndSet(null, addr);
                            }
                        } catch (RuntimeException e) {
                            LOG.warn("Error checking reachability of ip " + addr, e);
                        }
                    }
                }));
            }
            try {
                // TODO Could return faster; don't wait for all futures, just the first that succeeds
                Futures.allAsList(futures).get();
            } catch (Exception e) {
                throw Exceptions.propagate(e);
            }
            return reachableAddress.get() != null;
        }

        @Override
        public String toString() {
            return "setAndReturnTrueIfReachableAddressFound()";
        }
    };
}

From source file:org.opendaylight.openflowplugin.impl.registry.flow.DeviceFlowRegistryImpl.java

@Override
public ListenableFuture<List<Optional<FlowCapableNode>>> fill() {
    LOG.debug("Filling flow registry with flows for node: {}", instanceIdentifier.getKey().getId().getValue());

    // Prepare path for read transaction
    // TODO: Read only Tables, and not entire FlowCapableNode (fix Yang model)
    final InstanceIdentifier<FlowCapableNode> path = instanceIdentifier.augmentation(FlowCapableNode.class);

    // First, try to fill registry with flows from DS/Configuration
    CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> configFuture = fillFromDatastore(
            LogicalDatastoreType.CONFIGURATION, path);

    // Now, try to fill registry with flows from DS/Operational
    // in case of cluster fail over, when clients are not using DS/Configuration
    // for adding flows, but only RPCs
    CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> operationalFuture = fillFromDatastore(
            LogicalDatastoreType.OPERATIONAL, path);

    // And at last, chain and return futures created above.
    // Also, cache this future, so call to DeviceFlowRegistry.close() will be able
    // to cancel this future immediately if it will be still in progress
    final ListenableFuture<List<Optional<FlowCapableNode>>> lastFillFuture = Futures
            .allAsList(Arrays.asList(configFuture, operationalFuture));
    lastFillFutures.add(lastFillFuture);
    return lastFillFuture;
}

From source file:com.facebook.buck.distributed.DistBuildFileHashes.java

private static ListenableFuture<ImmutableMap<BuildRule, RuleKey>> ruleKeyComputation(ActionGraph actionGraph,
        final LoadingCache<ProjectFilesystem, DefaultRuleKeyFactory> ruleKeyFactories,
        ListeningExecutorService executorService) {
    List<ListenableFuture<Map.Entry<BuildRule, RuleKey>>> ruleKeyEntries = new ArrayList<>();
    for (final BuildRule rule : actionGraph.getNodes()) {
        ruleKeyEntries.add(executorService.submit(() -> Maps.immutableEntry(rule,
                ruleKeyFactories.get(rule.getProjectFilesystem()).build(rule))));
    }/* www.j av  a2  s. com*/
    ListenableFuture<List<Map.Entry<BuildRule, RuleKey>>> ruleKeyComputation = Futures
            .allAsList(ruleKeyEntries);
    return Futures.transform(ruleKeyComputation,
            new Function<List<Map.Entry<BuildRule, RuleKey>>, ImmutableMap<BuildRule, RuleKey>>() {
                @Override
                public ImmutableMap<BuildRule, RuleKey> apply(List<Map.Entry<BuildRule, RuleKey>> input) {
                    return ImmutableMap.copyOf(input);
                }
            }, executorService);
}

From source file:org.opendaylight.yangtools.yang.parser.repo.SharedSchemaContextFactory.java

private CheckedFuture<SchemaContext, SchemaResolutionException> createSchemaContext(
        final Collection<SourceIdentifier> requiredSources,
        final Cache<Collection<SourceIdentifier>, SchemaContext> cache,
        final AsyncFunction<List<ASTSchemaSource>, SchemaContext> assembleSources) {
    // Make sources unique
    final List<SourceIdentifier> uniqueSourceIdentifiers = deDuplicateSources(requiredSources);

    final SchemaContext existing = cache.getIfPresent(uniqueSourceIdentifiers);
    if (existing != null) {
        LOG.debug("Returning cached context {}", existing);
        return Futures.immediateCheckedFuture(existing);
    }/*from w  ww .  j a  va2  s.  c  o m*/

    // Request all sources be loaded
    ListenableFuture<List<ASTSchemaSource>> sf = Futures
            .allAsList(Collections2.transform(uniqueSourceIdentifiers, this::requestSource));

    // Detect mismatch between requested Source IDs and IDs that are extracted from parsed source
    // Also remove duplicates if present
    // We are relying on preserved order of uniqueSourceIdentifiers as well as sf
    sf = Futures.transform(sf, new SourceIdMismatchDetector(uniqueSourceIdentifiers));

    // Assemble sources into a schema context
    final ListenableFuture<SchemaContext> cf = Futures.transform(sf, assembleSources);

    // Populate cache when successful
    Futures.addCallback(cf, new FutureCallback<SchemaContext>() {
        @Override
        public void onSuccess(final SchemaContext result) {
            cache.put(uniqueSourceIdentifiers, result);
        }

        @Override
        public void onFailure(@Nonnull final Throwable t) {
            LOG.debug("Failed to assemble sources", t);
        }
    });

    return Futures.makeChecked(cf, MAPPER);
}

From source file:com.teradata.benchto.driver.execution.BenchmarkExecutionDriver.java

@SuppressWarnings("unchecked")
private List<QueryExecutionResult> executeQueries(Benchmark benchmark, int runs, boolean reportStatus) {
    ListeningExecutorService executorService = executorServiceFactory.create(benchmark.getConcurrency());
    try {/*from w  w w  .  j av a  2s .  co  m*/
        List<Callable<QueryExecutionResult>> queryExecutionCallables = buildQueryExecutionCallables(benchmark,
                runs, reportStatus);
        List<ListenableFuture<QueryExecutionResult>> executionFutures = (List) executorService
                .invokeAll(queryExecutionCallables);
        return Futures.allAsList(executionFutures).get();
    } catch (InterruptedException | ExecutionException e) {
        throw new BenchmarkExecutionException("Could not execute benchmark", e);
    } finally {
        executorService.shutdown();
    }
}

From source file:org.opendaylight.netconf.topology.impl.NetconfNodeOperationalDataAggregator.java

@Override
public ListenableFuture<Node> combineUpdateAttempts(final List<ListenableFuture<Node>> stateFutures) {
    final SettableFuture<Node> future = SettableFuture.create();
    final ListenableFuture<List<Node>> allAsList = Futures.allAsList(stateFutures);
    Futures.addCallback(allAsList, new FutureCallback<List<Node>>() {
        @Override//from w w  w  . j  a va2  s  . c o  m
        public void onSuccess(final List<Node> result) {
            Node base = null;
            NetconfNode baseAugmentation = null;
            AvailableCapabilities masterCaps = null;
            UnavailableCapabilities unavailableMasterCaps = null;
            final ArrayList<NodeStatus> statusList = new ArrayList<>();
            for (final Node node : result) {
                final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
                if (base == null && netconfNode.getConnectionStatus().equals(ConnectionStatus.Connected)) {
                    base = node;
                    baseAugmentation = netconfNode;
                }
                // we need to pull out caps from master, since slave does not go through resolution
                if (masterCaps == null) {
                    masterCaps = netconfNode.getAvailableCapabilities();
                    unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
                }
                if (netconfNode.getAvailableCapabilities().getAvailableCapability().size() > masterCaps
                        .getAvailableCapability().size()) {
                    masterCaps = netconfNode.getAvailableCapabilities();
                    unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
                }
                LOG.debug(netconfNode.toString());
                statusList.addAll(netconfNode.getClusteredConnectionStatus().getNodeStatus());
            }

            if (base == null) {
                base = result.get(0);
                baseAugmentation = result.get(0).getAugmentation(NetconfNode.class);
                LOG.debug("All results {}", result.toString());
            }

            final Node aggregatedNode = new NodeBuilder(base).addAugmentation(NetconfNode.class,
                    new NetconfNodeBuilder(baseAugmentation)
                            .setClusteredConnectionStatus(
                                    new ClusteredConnectionStatusBuilder().setNodeStatus(statusList).build())
                            .setAvailableCapabilities(masterCaps)
                            .setUnavailableCapabilities(unavailableMasterCaps).build())
                    .build();
            future.set(aggregatedNode);
        }

        @Override
        public void onFailure(final Throwable t) {
            LOG.error("One of the combined update attempts failed {}", t);
            future.setException(t);
        }
    });
    return future;
}

From source file:org.apache.streams.facebook.provider.FacebookProvider.java

@Override
public boolean isRunning() {
    if (datums.isEmpty() && executor.isTerminated() && Futures.allAsList(futures).isDone()) {
        LOGGER.info("Completed");
        isComplete.set(true);//from  ww w  .  ja  va2  s. co  m
        LOGGER.info("Exiting");
    }
    return !isComplete.get();
}

From source file:com.rackspacecloud.blueflood.inputs.processors.BatchWriter.java

public ListenableFuture<List<Boolean>> apply(List<List<Metric>> input) throws Exception {

    final CountDownLatch shortLatch = new CountDownLatch(input.size());
    final AtomicBoolean successfullyPersisted = new AtomicBoolean(true);

    final AtomicBoolean writeTimedOut = new AtomicBoolean(false);
    final long writeStartTime = System.currentTimeMillis();
    final TimerContext actualWriteCtx = writeDurationTimer.time();

    final List<ListenableFuture<Boolean>> resultFutures = new ArrayList<ListenableFuture<Boolean>>();

    for (List<Metric> metrics : input) {
        final int batchId = batchIdGenerator.next();
        final List<Metric> batch = metrics;

        ListenableFuture<Boolean> futureBatchResult = getThreadPool().submit(new Callable<Boolean>() {
            public Boolean call() throws Exception {
                try {
                    writer.insertFull(batch);

                    // marks this shard dirty, so rollup nodes know to pick up the work.
                    for (Metric metric : batch) {
                        context.update(metric.getCollectionTime(),
                                Util.computeShard(metric.getLocator().toString()));
                    }//from  w  ww.j  a va 2s  . c  om

                    return true;
                } catch (Exception ex) {
                    getLogger().error(ex.getMessage(), ex);
                    successfullyPersisted.set(false);
                    return false;
                } finally {
                    shortLatch.countDown();
                    bufferedMetrics.dec(batch.size());

                    if (System.currentTimeMillis() - writeStartTime > scribeTimeout.toMillis()) {
                        writeTimedOut.set(true);
                    }
                    done();
                }

            }

            private void done() {
                if (shortLatch.getCount() == 0) {
                    actualWriteCtx.stop();

                    if (writeTimedOut.get()) {
                        exceededScribeProcessingTime.mark();
                        getLogger().error("Exceeded scribe timeout " + scribeTimeout.toString()
                                + " before persisting " + "all metrics for scribe batch " + batchId);
                    }

                    if (!successfullyPersisted.get()) {
                        getLogger()
                                .warn("Did not persist all metrics successfully for scribe batch " + batchId);
                    }
                }
            }

        });

        resultFutures.add(futureBatchResult);
    }

    return Futures.allAsList(resultFutures);
}