Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:com.facebook.buck.apple.project_generator.WorkspaceAndProjectGenerator.java

private void generateProject(final Map<Path, ProjectGenerator> projectGenerators,
        ListeningExecutorService listeningExecutorService, WorkspaceGenerator workspaceGenerator,
        ImmutableSet<BuildTarget> targetsInRequiredProjects,
        ImmutableMultimap.Builder<BuildTarget, PBXTarget> buildTargetToPbxTargetMapBuilder,
        ImmutableMap.Builder<PBXTarget, Path> targetToProjectPathMapBuilder,
        final Optional<BuildTarget> targetToBuildWithBuck) throws IOException, InterruptedException {
    ImmutableMultimap.Builder<Cell, BuildTarget> projectCellToBuildTargetsBuilder = ImmutableMultimap.builder();
    for (TargetNode<?, ?> targetNode : projectGraph.getNodes()) {
        BuildTarget buildTarget = targetNode.getBuildTarget();
        projectCellToBuildTargetsBuilder.put(rootCell.getCell(buildTarget), buildTarget);
    }//from  w ww . j  a  va  2s .  c  om
    ImmutableMultimap<Cell, BuildTarget> projectCellToBuildTargets = projectCellToBuildTargetsBuilder.build();
    List<ListenableFuture<GenerationResult>> projectGeneratorFutures = new ArrayList<>();
    for (final Cell projectCell : projectCellToBuildTargets.keySet()) {
        ImmutableMultimap.Builder<Path, BuildTarget> projectDirectoryToBuildTargetsBuilder = ImmutableMultimap
                .builder();
        final ImmutableSet<BuildTarget> cellRules = ImmutableSet
                .copyOf(projectCellToBuildTargets.get(projectCell));
        for (BuildTarget buildTarget : cellRules) {
            projectDirectoryToBuildTargetsBuilder.put(buildTarget.getBasePath(), buildTarget);
        }
        ImmutableMultimap<Path, BuildTarget> projectDirectoryToBuildTargets = projectDirectoryToBuildTargetsBuilder
                .build();
        final Path relativeTargetCell = rootCell.getRoot().relativize(projectCell.getRoot());
        for (final Path projectDirectory : projectDirectoryToBuildTargets.keySet()) {
            final ImmutableSet<BuildTarget> rules = filterRulesForProjectDirectory(projectGraph,
                    ImmutableSet.copyOf(projectDirectoryToBuildTargets.get(projectDirectory)));
            if (Sets.intersection(targetsInRequiredProjects, rules).isEmpty()) {
                continue;
            }

            projectGeneratorFutures.add(listeningExecutorService.submit(() -> {
                GenerationResult result = generateProjectForDirectory(projectGenerators, targetToBuildWithBuck,
                        projectCell, projectDirectory, rules);
                // convert the projectPath to relative to the target cell here
                result = GenerationResult.of(relativeTargetCell.resolve(result.getProjectPath()),
                        result.isProjectGenerated(), result.getRequiredBuildTargets(),
                        result.getBuildTargetToGeneratedTargetMap());
                return result;
            }));
        }
    }

    List<GenerationResult> generationResults;
    try {
        generationResults = Futures.allAsList(projectGeneratorFutures).get();
    } catch (ExecutionException e) {
        Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
        Throwables.propagateIfPossible(e.getCause());
        throw new IllegalStateException("Unexpected exception: ", e);
    }
    for (GenerationResult result : generationResults) {
        if (!result.isProjectGenerated()) {
            continue;
        }
        workspaceGenerator.addFilePath(result.getProjectPath());
        processGenerationResult(buildTargetToPbxTargetMapBuilder, targetToProjectPathMapBuilder, result);
    }
}

From source file:org.apache.druid.segment.realtime.appenderator.AppenderatorImpl.java

@Override
public void clear() throws InterruptedException {
    // Drop commit metadata, then abandon all segments.

    try {/*ww w. j a va 2  s.c  om*/
        throwPersistErrorIfExists();

        if (persistExecutor != null) {
            final ListenableFuture<?> uncommitFuture = persistExecutor.submit(() -> {
                try {
                    commitLock.lock();
                    objectMapper.writeValue(computeCommitFile(), Committed.nil());
                } finally {
                    commitLock.unlock();
                }
                return null;
            });

            // Await uncommit.
            uncommitFuture.get();

            // Drop everything.
            final List<ListenableFuture<?>> futures = Lists.newArrayList();
            for (Map.Entry<SegmentIdentifier, Sink> entry : sinks.entrySet()) {
                futures.add(abandonSegment(entry.getKey(), entry.getValue(), true));
            }

            // Await dropping.
            Futures.allAsList(futures).get();
        }
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.tez.TezSessionPool.java

private ListenableFuture<?> resizeUpInternal(int delta) {
    // 1) Cancel the kills if any, to avoid killing the returned sessions.
    //    Also sets the count for the async initialization.
    int oldVal;//from  www  .j a  v a  2s .co m
    do {
        oldVal = deltaRemaining.get();
    } while (!deltaRemaining.compareAndSet(oldVal, oldVal + delta));
    int toStart = oldVal + delta;
    if (toStart <= 0)
        return createDummyFuture();
    LOG.info("Resizing the pool; adding " + toStart + " sessions");

    // 2) If we need to create some extra sessions, we'd do it just like startup does.
    int threadCount = Math.max(1, Math.min(toStart,
            HiveConf.getIntVar(initConf, ConfVars.HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS)));
    List<ListenableFutureTask<Boolean>> threadTasks = new ArrayList<>(threadCount);
    // This is an async method, so always launch threads, even for a single task.
    for (int i = 0; i < threadCount; ++i) {
        ListenableFutureTask<Boolean> task = ListenableFutureTask
                .create(new CreateSessionsRunnable(deltaRemaining));
        new Thread(task, "Tez pool resize " + i).start();
        threadTasks.add(task);
    }
    return Futures.allAsList(threadTasks);
}

From source file:org.apache.cassandra.db.commitlog.CommitLogSegmentManager.java

/**
 * Force a flush on all CFs that are still dirty in @param segments.
 *
 * @return a Future that will finish when all the flushes are complete.
 *//*from w  ww .  ja va  2s  .  c  o  m*/
private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force) {
    if (segments.isEmpty())
        return Futures.immediateFuture(null);
    final ReplayPosition maxReplayPosition = segments.get(segments.size() - 1).getContext();

    // a map of CfId -> forceFlush() to ensure we only queue one flush per cf
    final Map<UUID, ListenableFuture<?>> flushes = new LinkedHashMap<>();

    for (CommitLogSegment segment : segments) {
        for (UUID dirtyCFId : segment.getDirtyCFIDs()) {
            Pair<String, String> pair = Schema.instance.getCF(dirtyCFId);
            if (pair == null) {
                // even though we remove the schema entry before a final flush when dropping a CF,
                // it's still possible for a writer to race and finish his append after the flush.
                logger.trace("Marking clean CF {} that doesn't exist anymore", dirtyCFId);
                segment.markClean(dirtyCFId, segment.getContext());
            } else if (!flushes.containsKey(dirtyCFId)) {
                String keyspace = pair.left;
                final ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(dirtyCFId);
                // can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
                // no deadlock possibility since switchLock removal
                flushes.put(dirtyCFId, force ? cfs.forceFlush() : cfs.forceFlush(maxReplayPosition));
            }
        }
    }

    return Futures.allAsList(flushes.values());
}

From source file:com.microsoftopentechnologies.intellij.helpers.azure.sdk.AzureSDKManagerImpl.java

@NotNull
@Override/*w ww. ja va2 s .  c o  m*/
public List<StorageAccount> getStorageAccounts(@NotNull String subscriptionId) throws AzureCmdException {
    List<StorageAccount> saList = new ArrayList<StorageAccount>();
    StorageManagementClient client = null;

    try {
        client = getStorageManagementClient(subscriptionId);

        ArrayList<com.microsoft.windowsazure.management.storage.models.StorageAccount> storageAccounts = getStorageAccounts(
                client).getStorageAccounts();

        if (storageAccounts == null) {
            return saList;
        }

        List<ListenableFuture<StorageAccount>> saFutureList = new ArrayList<ListenableFuture<StorageAccount>>();

        for (com.microsoft.windowsazure.management.storage.models.StorageAccount storageAccount : storageAccounts) {
            saFutureList.add(getStorageAccountAsync(subscriptionId, client, storageAccount));
        }

        saList.addAll(Futures.allAsList(saFutureList).get());

        return saList;
    } catch (ExecutionException e) {
        throw new AzureCmdException("Error retrieving the VM list", e.getCause());
    } catch (Throwable t) {
        throw new AzureCmdException("Error retrieving the VM list", t);
    } finally {
        if (client != null) {
            try {
                client.close();
            } catch (IOException ignored) {
            }
        }
    }
}

From source file:org.opendaylight.groupbasedpolicy.renderer.ofoverlay.arp.ArpTasker.java

/**
 * Uses ARP to get MAC for the given L3 endpoint. Tries to find MAC for IP from
 * {@link EndpointL3#getKey()}.<br>
 * {@link EndpointL3#getNetworkContainment()} has to point to a {@link Subnet}.<br>
 * ARP Request is sent from all node connectors obtaining from
 * {@link OfOverlayNodeConfig#getExternalInterfaces()}<br>
 * MAC address obtained from ARP reply is added to the given L3 endpoint (if still exits).<br>
 * Also an {@link Endpoint} is created based on MAC If the subnet from network containment point
 * to {@link L2BridgeDomain} directly or throught {@link L2FloodDomain}.
 *
 * @param l3Ep the L3 endpoint which needs to have an MAC address
 *//*from w w w  .  j a va  2s .c  om*/
public void addMacForL3EpAndCreateEp(final EndpointL3 l3Ep) {
    final Ipv4Address tpa = getIPv4Addresses(l3Ep);
    if (tpa == null) {
        LOG.debug("L3 endpoint {} does not contain IPv4 address.", l3Ep.getKey());
        return;
    }
    ReadOnlyTransaction rTx = dataProvider.newReadOnlyTransaction();
    final SetMultimap<Node, Pair<InstanceIdentifier<NodeConnector>, MacAddress>> extNcWithMacByNode = readNodesWithExternalIfaces(
            rTx);
    if (extNcWithMacByNode.isEmpty()) {
        LOG.debug("No node with external interface was found.");
        rTx.close();
        return;
    }
    final Ipv4Address senderIpAddress = createSenderIpAddress(l3Ep, rTx);
    if (senderIpAddress == null) {
        LOG.warn("Cannot create sender IPv4 address for L3 endpoint {}", l3Ep);
        rTx.close();
        return;
    }
    rTx.close();

    for (final Node node : extNcWithMacByNode.keySet()) {
        final InstanceIdentifier<Node> nodeIid = InstanceIdentifier.builder(Nodes.class)
                .child(Node.class, node.getKey()).build();
        final NodeRef nodeRef = new NodeRef(nodeIid);
        List<ListenableFuture<RpcResult<AddFlowOutput>>> arpFlowResultFutures = new ArrayList<>();
        List<Pair<RemoveFlowInput, EndpointL3Key>> flowsForRemove = new ArrayList<>();
        for (final Pair<InstanceIdentifier<NodeConnector>, MacAddress> extNcIidAndMac : extNcWithMacByNode
                .get(node)) {
            final ArpMessageAddress senderAddress = new ArpMessageAddress(extNcIidAndMac.getRight(),
                    senderIpAddress);
            NodeConnectorId ncId = extNcIidAndMac.getLeft()
                    .firstKeyOf(NodeConnector.class, NodeConnectorKey.class).getId();
            final Flow arpReplyToControllerFlow = createArpReplyToControllerFlow(senderAddress, tpa, ncId);
            flowsForRemove.add(new ImmutablePair<>(
                    new RemoveFlowInputBuilder(arpReplyToControllerFlow).setNode(nodeRef).build(),
                    l3Ep.getKey()));
            final InstanceIdentifier<Flow> flowIid = createFlowIid(arpReplyToControllerFlow, nodeIid);
            Future<RpcResult<AddFlowOutput>> futureAddFlowResult = flowService
                    .addFlow(new AddFlowInputBuilder(arpReplyToControllerFlow).setFlowRef(new FlowRef(flowIid))
                            .setNode(nodeRef).build());
            arpFlowResultFutures.add(JdkFutureAdapters.listenInPoolThread(futureAddFlowResult));
        }
        requestInfoByKey.putAll(createKey(node.getId(), tpa), flowsForRemove);
        ListenableFuture<List<RpcResult<AddFlowOutput>>> futureArpFlowResults = Futures
                .allAsList(arpFlowResultFutures);
        Futures.addCallback(futureArpFlowResults, new FutureCallback<List<RpcResult<AddFlowOutput>>>() {

            @Override
            public void onSuccess(List<RpcResult<AddFlowOutput>> result) {
                for (RpcResult<AddFlowOutput> addFlowResult : result) {
                    if (!addFlowResult.isSuccessful()) {
                        LOG.warn("An ARP Reply to Controller flow was not created on node {} \nErrors: {}",
                                node.getId().getValue(), addFlowResult.getErrors());
                        continue;
                    }
                }
                LOG.debug("ARP Reply to Controller flows were created on node {}", node.getId().getValue());
                for (final Pair<InstanceIdentifier<NodeConnector>, MacAddress> extNcIidAndMac : extNcWithMacByNode
                        .get(node)) {
                    final ArpMessageAddress senderAddress = new ArpMessageAddress(extNcIidAndMac.getRight(),
                            senderIpAddress);
                    ListenableFuture<RpcResult<Void>> futureSendArpResult = arpSender.sendArp(senderAddress,
                            tpa, extNcIidAndMac.getLeft());
                    Futures.addCallback(futureSendArpResult, logResult(tpa, extNcIidAndMac.getLeft()));
                }
            }

            @Override
            public void onFailure(Throwable t) {
                LOG.error(
                        "Illegal state - Installation of ARP flows on node {} failed. Node can contain just some ARP flows.",
                        node.getId(), t);
            }
        });
    }
}

From source file:com.facebook.buck.core.build.engine.impl.CachingBuildEngine.java

private ListenableFuture<BuildResult> getBuildRuleResultWithRuntimeDepsUnlocked(BuildRule rule,
        BuildEngineBuildContext buildContext, ExecutionContext executionContext) {

    // If the rule is already executing, return its result future from the cache.
    ListenableFuture<BuildResult> existingResult = results.get(rule.getBuildTarget());
    if (existingResult != null) {
        return existingResult;
    }/*  ww w.j  av a  2 s . c  o m*/

    // Get the future holding the result for this rule and, if we have no additional runtime deps
    // to attach, return it.
    ListenableFuture<RuleKey> ruleKey = calculateRuleKey(rule, buildContext);
    ListenableFuture<BuildResult> result = Futures.transformAsync(ruleKey,
            input -> processBuildRule(rule, buildContext, executionContext),
            serviceByAdjustingDefaultWeightsTo(SCHEDULING_MORE_WORK_RESOURCE_AMOUNTS));
    if (!(rule instanceof HasRuntimeDeps)) {
        results.put(rule.getBuildTarget(), result);
        return result;
    }

    // Collect any runtime deps we have into a list of futures.
    Stream<BuildTarget> runtimeDepPaths = ((HasRuntimeDeps) rule).getRuntimeDeps(ruleFinder);
    List<ListenableFuture<BuildResult>> runtimeDepResults = new ArrayList<>();
    ImmutableSet<BuildRule> runtimeDeps = resolver
            .getAllRules(runtimeDepPaths.collect(ImmutableSet.toImmutableSet()));
    for (BuildRule dep : runtimeDeps) {
        runtimeDepResults.add(getBuildRuleResultWithRuntimeDepsUnlocked(dep, buildContext, executionContext));
    }

    // Create a new combined future, which runs the original rule and all the runtime deps in
    // parallel, but which propagates an error if any one of them fails.
    // It also checks that all runtime deps succeeded.
    ListenableFuture<BuildResult> chainedResult = Futures.transformAsync(Futures.allAsList(runtimeDepResults),
            results -> {
                if (!isKeepGoingEnabled(buildContext)) {
                    for (BuildResult buildResult : results) {
                        if (!buildResult.isSuccess()) {
                            return Futures
                                    .immediateFuture(BuildResult.canceled(rule, buildResult.getFailure()));
                        }
                    }
                }
                return result;
            }, MoreExecutors.directExecutor());
    results.put(rule.getBuildTarget(), chainedResult);
    return chainedResult;
}

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.strategy.SyncPlanPushStrategyIncrementalImpl.java

private ListenableFuture<RpcResult<Void>> flushRemoveGroupPortionAndBarrier(
        final InstanceIdentifier<FlowCapableNode> nodeIdent, final ItemSyncBox<Group> groupsPortion) {
    List<ListenableFuture<RpcResult<RemoveGroupOutput>>> allResults = new ArrayList<>();
    for (Group group : groupsPortion.getItemsToPush()) {
        final KeyedInstanceIdentifier<Group, GroupKey> groupIdent = nodeIdent.child(Group.class,
                group.getKey());//from   w w w  .ja  v  a 2  s .c o  m
        allResults
                .add(JdkFutureAdapters.listenInPoolThread(groupForwarder.remove(groupIdent, group, nodeIdent)));
    }

    final ListenableFuture<RpcResult<Void>> singleVoidResult = Futures.transform(Futures.allAsList(allResults),
            ReconcileUtil.<RemoveGroupOutput>createRpcResultCondenser("group remove"));

    return Futures.transform(singleVoidResult,
            ReconcileUtil.chainBarrierFlush(PathUtil.digNodePath(nodeIdent), transactionService));
}

From source file:de.blizzy.documentr.search.PageIndex.java

private SearchResult findPages(String searchText, int page, Authentication authentication,
        IndexSearcher searcher) throws ParseException, IOException, TimeoutException {

    Future<Query> queryFuture = taskExecutor.submit(new ParseQueryTask(searchText, analyzer));
    Bits visibleDocIds = getVisibleDocIds(searcher, authentication);

    Query query;//from w w w .  j  av a2s .  c o m
    try {
        query = queryFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof ParseException) {
            throw (ParseException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        queryFuture.cancel(false);
    }
    TopDocs docs = searcher.search(query, new PagePermissionFilter(visibleDocIds), HITS_PER_PAGE * page);

    int start = HITS_PER_PAGE * (page - 1);
    int end = Math.min(HITS_PER_PAGE * page, docs.scoreDocs.length);
    IndexReader reader = searcher.getIndexReader();
    List<ListenableFuture<SearchHit>> hitFutures = Lists.newArrayList();
    for (int i = start; i < end; i++) {
        ListenableFuture<SearchHit> hitFuture = taskExecutor
                .submit(new GetSearchHitTask(query, reader, docs.scoreDocs[i].doc, analyzer));
        hitFutures.add(hitFuture);
    }

    try {
        ListenableFuture<List<SearchHit>> allHitsFuture = Futures.allAsList(hitFutures);
        List<SearchHit> hits = allHitsFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        return new SearchResult(hits, docs.totalHits, HITS_PER_PAGE);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IOException) {
            throw (IOException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        for (ListenableFuture<SearchHit> hitFuture : hitFutures) {
            hitFuture.cancel(false);
        }
    }
}

From source file:org.apache.cassandra.db.commitlog.AbstractCommitLogSegmentManager.java

/**
 * Force a flush on all CFs that are still dirty in @param segments.
 *
 * @return a Future that will finish when all the flushes are complete.
 *//*  ww  w .j  a va 2 s . c om*/
private Future<?> flushDataFrom(List<CommitLogSegment> segments, boolean force) {
    if (segments.isEmpty())
        return Futures.immediateFuture(null);
    final CommitLogPosition maxCommitLogPosition = segments.get(segments.size() - 1)
            .getCurrentCommitLogPosition();

    // a map of CfId -> forceFlush() to ensure we only queue one flush per cf
    final Map<UUID, ListenableFuture<?>> flushes = new LinkedHashMap<>();

    for (CommitLogSegment segment : segments) {
        for (UUID dirtyCFId : segment.getDirtyCFIDs()) {
            Pair<String, String> pair = Schema.instance.getCF(dirtyCFId);
            if (pair == null) {
                // even though we remove the schema entry before a final flush when dropping a CF,
                // it's still possible for a writer to race and finish his append after the flush.
                logger.trace("Marking clean CF {} that doesn't exist anymore", dirtyCFId);
                segment.markClean(dirtyCFId, CommitLogPosition.NONE, segment.getCurrentCommitLogPosition());
            } else if (!flushes.containsKey(dirtyCFId)) {
                String keyspace = pair.left;
                final ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(dirtyCFId);
                // can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
                // no deadlock possibility since switchLock removal
                flushes.put(dirtyCFId, force ? cfs.forceFlush() : cfs.forceFlush(maxCommitLogPosition));
            }
        }
    }

    return Futures.allAsList(flushes.values());
}