Example usage for com.google.common.util.concurrent Futures successfulAsList

List of usage examples for com.google.common.util.concurrent Futures successfulAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures successfulAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> successfulAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its successful input futures.

Usage

From source file:org.opendaylight.openflowplugin.impl.services.SalFlowsBatchServiceImpl.java

@Override
public Future<RpcResult<RemoveFlowsBatchOutput>> removeFlowsBatch(final RemoveFlowsBatchInput input) {
    LOG.trace("Removing flows @ {} : {}", PathUtil.extractNodeId(input.getNode()),
            input.getBatchRemoveFlows().size());
    final ArrayList<ListenableFuture<RpcResult<RemoveFlowOutput>>> resultsLot = new ArrayList<>();
    for (BatchFlowInputGrouping batchFlow : input.getBatchRemoveFlows()) {
        final RemoveFlowInput removeFlowInput = new RemoveFlowInputBuilder(batchFlow)
                .setFlowRef(createFlowRef(input.getNode(), batchFlow)).setNode(input.getNode()).build();
        resultsLot.add(JdkFutureAdapters.listenInPoolThread(salFlowService.removeFlow(removeFlowInput)));
    }/*from   w ww .  j  av a 2 s .  c  o m*/

    final ListenableFuture<RpcResult<List<BatchFailedFlowsOutput>>> commonResult = Futures.transform(
            Futures.successfulAsList(resultsLot),
            FlowUtil.<RemoveFlowOutput>createCumulatingFunction(input.getBatchRemoveFlows()));

    ListenableFuture<RpcResult<RemoveFlowsBatchOutput>> removeFlowsBulkFuture = Futures.transform(commonResult,
            FlowUtil.FLOW_REMOVE_TRANSFORM);

    if (input.isBarrierAfter()) {
        removeFlowsBulkFuture = BarrierUtil.chainBarrier(removeFlowsBulkFuture, input.getNode(),
                transactionService, FlowUtil.FLOW_REMOVE_COMPOSING_TRANSFORM);
    }

    return removeFlowsBulkFuture;
}

From source file:org.thingsboard.server.dao.alarm.CassandraAlarmDao.java

@Override
public ListenableFuture<List<AlarmInfo>> findAlarms(TenantId tenantId, AlarmQuery query) {
    log.trace("Try to find alarms by entity [{}], searchStatus [{}], status [{}] and pageLink [{}]",
            query.getAffectedEntityId(), query.getSearchStatus(), query.getStatus(), query.getPageLink());
    EntityId affectedEntity = query.getAffectedEntityId();
    String searchStatusName;//from   w ww  . java2  s.c o  m
    if (query.getSearchStatus() == null && query.getStatus() == null) {
        searchStatusName = AlarmSearchStatus.ANY.name();
    } else if (query.getSearchStatus() != null) {
        searchStatusName = query.getSearchStatus().name();
    } else {
        searchStatusName = query.getStatus().name();
    }
    String relationType = BaseAlarmService.ALARM_RELATION_PREFIX + searchStatusName;
    ListenableFuture<List<EntityRelation>> relations = relationDao.findRelations(tenantId, affectedEntity,
            relationType, RelationTypeGroup.ALARM, EntityType.ALARM, query.getPageLink());
    return Futures.transformAsync(relations, input -> {
        List<ListenableFuture<AlarmInfo>> alarmFutures = new ArrayList<>(input.size());
        for (EntityRelation relation : input) {
            alarmFutures.add(
                    Futures.transform(findAlarmByIdAsync(tenantId, relation.getTo().getId()), AlarmInfo::new));
        }
        return Futures.successfulAsList(alarmFutures);
    });
}

From source file:org.thingsboard.server.dao.alarm.AlarmDaoImpl.java

@Override
public ListenableFuture<List<AlarmInfo>> findAlarms(AlarmQuery query) {
    log.trace("Try to find alarms by entity [{}], searchStatus [{}], status [{}] and pageLink [{}]",
            query.getAffectedEntityId(), query.getSearchStatus(), query.getStatus(), query.getPageLink());
    EntityId affectedEntity = query.getAffectedEntityId();
    String searchStatusName;/*from   w w  w  .j  a  va 2  s .  co  m*/
    if (query.getSearchStatus() == null && query.getStatus() == null) {
        searchStatusName = AlarmSearchStatus.ANY.name();
    } else if (query.getSearchStatus() != null) {
        searchStatusName = query.getSearchStatus().name();
    } else {
        searchStatusName = query.getStatus().name();
    }
    String relationType = BaseAlarmService.ALARM_RELATION_PREFIX + searchStatusName;
    ListenableFuture<List<EntityRelation>> relations = relationDao.findRelations(affectedEntity, relationType,
            RelationTypeGroup.ALARM, EntityType.ALARM, query.getPageLink());
    return Futures.transform(relations, (AsyncFunction<List<EntityRelation>, List<AlarmInfo>>) input -> {
        List<ListenableFuture<AlarmInfo>> alarmFutures = new ArrayList<>(input.size());
        for (EntityRelation relation : input) {
            alarmFutures.add(Futures.transform(findAlarmByIdAsync(relation.getTo().getId()),
                    (Function<Alarm, AlarmInfo>) alarm1 -> new AlarmInfo(alarm1)));
        }
        return Futures.successfulAsList(alarmFutures);
    });
}

From source file:com.facebook.buck.event.listener.RemoteLogUploaderEventListener.java

@Override
public synchronized void outputTrace(BuildId buildId) throws InterruptedException {
    ImmutableSet<ListenableFuture<Void>> uploads;
    ListenableFuture<Void> drain = remoteLogger.close();
    synchronized (pendingUploads) {
        pendingUploads.add(drain);/*w  ww  .  j a  v  a  2s  . co m*/
        uploads = ImmutableSet.copyOf(pendingUploads);
        pendingUploads.clear();
    }
    try {
        Futures.successfulAsList(uploads).get(BlockingHttpEndpoint.DEFAULT_COMMON_TIMEOUT_MS * 100,
                TimeUnit.MILLISECONDS);
    } catch (ExecutionException | TimeoutException e) {
        LOG.info(e, "Failed uploading remaining log data to remote server");
    }
}

From source file:org.apache.brooklyn.util.net.ReachableSocketFinder.java

/**
 * Checks if any any of the given HostAndPorts are reachable. It checks them all concurrently, and
 * returns the first that is reachable (or Optional.absent).
 *//*from   w w w  . j  a  v  a 2  s .c  o m*/
private Optional<HostAndPort> tryReachable(Collection<? extends HostAndPort> sockets, Duration timeout) {
    final AtomicReference<HostAndPort> reachableSocket = new AtomicReference<HostAndPort>();
    final CountDownLatch latch = new CountDownLatch(1);
    List<ListenableFuture<?>> futures = Lists.newArrayList();
    for (final HostAndPort socket : sockets) {
        futures.add(userExecutor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (socketTester.apply(socket)) {
                        reachableSocket.compareAndSet(null, socket);
                        latch.countDown();
                    }
                } catch (RuntimeInterruptedException e) {
                    throw e;
                } catch (RuntimeException e) {
                    LOG.warn("Error checking reachability of ip:port " + socket, e);
                }
            }
        }));
    }

    ListenableFuture<List<Object>> compoundFuture = Futures.successfulAsList(futures);
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        while (reachableSocket.get() == null && !compoundFuture.isDone() && timeout.isLongerThan(stopwatch)) {
            latch.await(50, TimeUnit.MILLISECONDS);
        }
        return Optional.fromNullable(reachableSocket.get());

    } catch (InterruptedException e) {
        throw Exceptions.propagate(e);
    } finally {
        for (Future<?> future : futures) {
            future.cancel(true);
        }
    }
}

From source file:co.cask.cdap.internal.app.runtime.service.InMemoryProgramRuntimeService.java

private void stopAllPrograms() {

    LOG.info("Stopping all running programs.");

    List<ListenableFuture<ProgramController>> futures = Lists.newLinkedList();
    for (ProgramType type : ProgramType.values()) {
        for (Map.Entry<RunId, RuntimeInfo> entry : list(type).entrySet()) {
            RuntimeInfo runtimeInfo = entry.getValue();
            if (isRunning(runtimeInfo.getProgramId())) {
                futures.add(runtimeInfo.getController().stop());
            }//from  w w  w  .  ja  va 2  s  .  c o m
        }
    }
    // unchecked because we cannot do much if it fails. We will still shutdown the standalone CDAP instance.
    try {
        Futures.successfulAsList(futures).get(60, TimeUnit.SECONDS);
        LOG.info("All programs have been stopped.");
    } catch (ExecutionException e) {
        // note this should not happen because we wait on a successfulAsList
        LOG.warn("Got exception while waiting for all programs to stop", e.getCause());
    } catch (InterruptedException e) {
        LOG.warn("Got interrupted exception while waiting for all programs to stop", e);
        Thread.currentThread().interrupt();
    } catch (TimeoutException e) {
        // can't do much more than log it. We still want to exit.
        LOG.warn("Timeout while waiting for all programs to stop.");
    }
}

From source file:co.cask.tigon.internal.app.runtime.flow.FlowProgramRunner.java

/**
 * Starts all flowlets in the flow program.
 * @param program Program to run//  w  w w  . j a  va  2  s. c o m
 * @param flowSpec The {@link FlowSpecification}.
 * @return A {@link com.google.common.collect.Table} with row as flowlet id, column as instance id,
 * cell as the {@link ProgramController} for the flowlet.
 */
private Table<String, Integer, ProgramController> createFlowlets(Program program, RunId runId,
        FlowSpecification flowSpec) {
    Table<String, Integer, ProgramController> flowlets = HashBasedTable.create();

    try {
        for (Map.Entry<String, FlowletDefinition> entry : flowSpec.getFlowlets().entrySet()) {
            int instanceCount = entry.getValue().getInstances();
            for (int instanceId = 0; instanceId < instanceCount; instanceId++) {
                flowlets.put(entry.getKey(), instanceId, startFlowlet(program,
                        createFlowletOptions(entry.getKey(), instanceId, instanceCount, runId)));
            }
        }
    } catch (Throwable t) {
        try {
            // Need to stop all started flowlets
            Futures.successfulAsList(Iterables.transform(flowlets.values(),
                    new Function<ProgramController, ListenableFuture<?>>() {
                        @Override
                        public ListenableFuture<?> apply(ProgramController controller) {
                            return controller.stop();
                        }
                    })).get();
        } catch (Exception e) {
            LOG.error("Fail to stop all flowlets on failure.");
        }
        throw Throwables.propagate(t);
    }
    return flowlets;
}

From source file:co.cask.cdap.logging.framework.distributed.DistributedLogFramework.java

/**
 * Blocks and validates all the given futures completed successfully.
 *//*from   www.j  a  v  a2 s  .  c o  m*/
private void validateAllFutures(Iterable<? extends ListenableFuture<?>> futures) throws Exception {
    // The get call shouldn't throw exception. It just block until all futures completed.
    Futures.successfulAsList(futures).get();

    // Iterates all futures to make sure all of them completed successfully
    Throwable exception = null;
    for (ListenableFuture<?> future : futures) {
        try {
            future.get();
        } catch (ExecutionException e) {
            if (exception == null) {
                exception = e.getCause();
            } else {
                exception.addSuppressed(e.getCause());
            }
        }
    }

    // Throw exception if any of the future failed.
    if (exception != null) {
        if (exception instanceof Exception) {
            throw (Exception) exception;
        }
        throw new RuntimeException(exception);
    }
}

From source file:com.continuuity.loom.common.zookeeper.lib.ZKCollection.java

private void setExternalChangeWatcher() throws ExecutionException, InterruptedException {

    ZKOperations.watchChildren(zkClient, "", new ZKOperations.ChildrenCallback() {
        @Override//from w  ww .j  av  a2s  . c o  m
        public void updated(NodeChildren nodeChildren) {
            List<String> nodes = nodeChildren.getChildren();
            List<OperationFuture<NodeData>> dataFutures = Lists.newArrayList();
            for (String node : nodes) {
                dataFutures.add(zkClient.getData(getNodePath(node)));
            }

            final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
            fetchFuture.addListener(new Runnable() {
                @Override
                public void run() {
                    ImmutableList.Builder<T> builder = ImmutableList.builder();
                    // fetchFuture is set by this time
                    List<NodeData> nodesData = Futures.getUnchecked(fetchFuture);
                    for (NodeData nodeData : nodesData) {
                        builder.add(serializer.deserialize(nodeData.getData()));
                    }

                    currentView.set(builder.build());
                }
            }, Threads.SAME_THREAD_EXECUTOR);

        }
    });
}

From source file:org.jclouds.examples.rackspace.cloudfiles.UploadDirectoryToCDN.java

/**
 * Upload the files in parallel./* w  w w.  j av  a 2  s  .  co  m*/
 */
private void uploadFiles(String container, List<BlobDetail> blobDetails)
        throws InterruptedException, ExecutionException {
    ListeningExecutorService executor = MoreExecutors.listeningDecorator(newFixedThreadPool(THREADS));
    List<ListenableFuture<BlobDetail>> blobUploaderFutures = Lists.newArrayList();
    BlobUploaderCallback blobUploaderCallback = new BlobUploaderCallback();

    try {

        for (BlobDetail blobDetail : blobDetails) {
            BlobUploader blobUploader = new BlobUploader(container, blobDetail);
            ListenableFuture<BlobDetail> blobDetailFuture = executor.submit(blobUploader);
            blobUploaderFutures.add(blobDetailFuture);

            Futures.addCallback(blobDetailFuture, blobUploaderCallback);
        }

        ListenableFuture<List<BlobDetail>> future = Futures.successfulAsList(blobUploaderFutures);
        List<BlobDetail> uploadedBlobDetails = future.get(); // begin the upload

        System.out.format("%n");

        for (int i = 0; i < uploadedBlobDetails.size(); i++) {
            if (uploadedBlobDetails.get(i) != null) {
                BlobDetail blobDetail = uploadedBlobDetails.get(i);
                System.out.format("  %s (eTag: %s)%n", blobDetail.getRemoteBlobName(), blobDetail.getETag());
            } else {
                System.out.format(" %s (ERROR)%n", blobDetails.get(i).getLocalFile().getAbsolutePath());
            }
        }
    } finally {
        executor.shutdown();
    }
}