Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:org.thingsboard.server.dao.timeseries.CassandraBaseTimeseriesDao.java

private ListenableFuture<List<TsKvEntry>> findAllAsync(TenantId tenantId, EntityId entityId,
        ReadTsKvQuery query) {//from   w w w. ja v a  2  s.com
    if (query.getAggregation() == Aggregation.NONE) {
        return findAllAsyncWithLimit(tenantId, entityId, query);
    } else {
        long step = Math.max(query.getInterval(), MIN_AGGREGATION_STEP_MS);
        long stepTs = query.getStartTs();
        List<ListenableFuture<Optional<TsKvEntry>>> futures = new ArrayList<>();
        while (stepTs < query.getEndTs()) {
            long startTs = stepTs;
            long endTs = stepTs + step;
            ReadTsKvQuery subQuery = new BaseReadTsKvQuery(query.getKey(), startTs, endTs, step, 1,
                    query.getAggregation(), query.getOrderBy());
            futures.add(findAndAggregateAsync(tenantId, entityId, subQuery, toPartitionTs(startTs),
                    toPartitionTs(endTs)));
            stepTs = endTs;
        }
        ListenableFuture<List<Optional<TsKvEntry>>> future = Futures.allAsList(futures);
        return Futures.transform(future, new Function<List<Optional<TsKvEntry>>, List<TsKvEntry>>() {
            @Nullable
            @Override
            public List<TsKvEntry> apply(@Nullable List<Optional<TsKvEntry>> input) {
                return input == null ? Collections.emptyList()
                        : input.stream().filter(v -> v.isPresent()).map(v -> v.get())
                                .collect(Collectors.toList());
            }
        }, readResultsProcessingExecutor);
    }
}

From source file:io.crate.executor.transport.executionphases.ExecutionPhasesTask.java

private void setupContext(Map<String, Collection<NodeOperation>> operationByServer,
        List<Tuple<ExecutionPhase, RowReceiver>> handlerPhases, InitializationTracker initializationTracker)
        throws Throwable {

    String localNodeId = clusterService.localNode().id();
    Collection<NodeOperation> localNodeOperations = operationByServer.remove(localNodeId);
    if (localNodeOperations == null) {
        localNodeOperations = Collections.emptyList();
    }//from w  w  w.  j a v  a  2  s  .  c  o m

    JobExecutionContext.Builder builder = jobContextService.newBuilder(jobId(), localNodeId);
    List<ListenableFuture<Bucket>> directResponseFutures = contextPreparer.prepareOnHandler(localNodeOperations,
            builder, handlerPhases, new SharedShardContexts(indicesService));
    JobExecutionContext localJobContext = jobContextService.createContext(builder);

    List<PageBucketReceiver> pageBucketReceivers = getHandlerBucketReceivers(localJobContext, handlerPhases);
    int bucketIdx = 0;

    if (!localNodeOperations.isEmpty()) {
        if (directResponseFutures.isEmpty()) {
            initializationTracker.jobInitialized();
        } else {
            Futures.addCallback(Futures.allAsList(directResponseFutures),
                    new SetBucketCallback(pageBucketReceivers, bucketIdx, initializationTracker));
            bucketIdx++;
        }
    }
    localJobContext.start();
    sendJobRequests(localNodeId, operationByServer, pageBucketReceivers, handlerPhases, bucketIdx,
            initializationTracker);
}

From source file:org.apache.hive.ptest.execution.Phase.java

private <T extends RemoteCommandResult> List<T> toListOfResults(List<ListenableFuture<T>> futures,
        boolean reportErrors) throws Exception {
    List<T> results = Lists.newArrayList();
    for (T result : Futures.allAsList(futures).get()) {
        if (result != null) {
            if (reportErrors && (result.getException() != null || result.getExitCode() != 0)) {
                throw new SSHExecutionException(result);
            }/*from ww w . j ava 2 s . c  o m*/
            results.add(result);
        }
    }
    return results;
}

From source file:com.google.devtools.build.android.ideinfo.JarFilter.java

/** Finds the expected jar archive file name prefixes for the java files. */
static List<String> parseJavaFiles(List<Path> javaFiles) throws IOException {
    ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()));

    List<ListenableFuture<String>> futures = Lists.newArrayList();
    for (final Path javaFile : javaFiles) {
        futures.add(executorService.submit(new Callable<String>() {
            @Override//w  w w .ja va  2s .co m
            public String call() throws Exception {
                String packageString = getDeclaredPackageOfJavaFile(javaFile);
                return packageString != null ? getArchiveFileNamePrefix(javaFile.toString(), packageString)
                        : null;
            }
        }));
    }
    try {
        List<String> archiveFileNamePrefixes = Futures.allAsList(futures).get();
        List<String> result = Lists.newArrayList();
        for (String archiveFileNamePrefix : archiveFileNamePrefixes) {
            if (archiveFileNamePrefix != null) {
                result.add(archiveFileNamePrefix);
            }
        }
        return result;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IOException(e);
    } catch (ExecutionException e) {
        throw new IOException(e);
    }
}

From source file:org.opendaylight.netconf.topology.impl.NetconfNodeOperationalDataAggregator.java

@Override
public ListenableFuture<Void> combineDeleteAttempts(final List<ListenableFuture<Void>> stateFutures) {
    final SettableFuture<Void> future = SettableFuture.create();
    final ListenableFuture<List<Void>> allAsList = Futures.allAsList(stateFutures);
    Futures.addCallback(allAsList, new FutureCallback<List<Void>>() {
        @Override//from  w  w  w  .j  a v  a 2  s.  c  o  m
        public void onSuccess(final List<Void> result) {
            future.set(null);
        }

        @Override
        public void onFailure(final Throwable t) {
            LOG.error("One of the combined delete attempts failed {}", t);
            future.setException(t);
        }
    });
    return future;
}

From source file:com.continuuity.weave.yarn.ApplicationMasterService.java

private void doStart() throws Exception {
    LOG.info("Start application master with spec: " + WeaveSpecificationAdapter.create().toJson(weaveSpec));

    yarnRPC = YarnRPC.create(yarnConf);//from   w  ww  .ja  v a 2s . c om

    amrmClient.init(yarnConf);
    amrmClient.start();
    // TODO: Have RPC host and port
    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster("", 0, null);
    maxCapability = response.getMaximumResourceCapability();
    minCapability = response.getMinimumResourceCapability();

    LOG.info("Maximum resource capability: " + maxCapability);
    LOG.info("Minimum resource capability: " + minCapability);

    // Creates ZK path for runnable and kafka logging service
    Futures.allAsList(
            ImmutableList.of(zkClientService.create("/" + runId + "/runnables", null, CreateMode.PERSISTENT),
                    zkClientService.create("/" + runId + "/kafka", null, CreateMode.PERSISTENT)))
            .get();
}

From source file:io.crate.action.sql.TransportBaseSQLAction.java

private void executePlan(final AnalyzedStatement analyzedStatement, final Plan plan, final String[] outputNames,
        final DataType[] outputTypes, final ActionListener<TResponse> listener, final TRequest request) {
    Executor executor = executorProvider.get();
    Job job = executor.newJob(plan);/*  w w w  .  j a v  a  2s  . co  m*/
    final UUID jobId = job.id();
    if (jobId != null) {
        statsTables.jobStarted(jobId, request.stmt());
    }
    List<ListenableFuture<TaskResult>> resultFutureList = executor.execute(job);
    Futures.addCallback(Futures.allAsList(resultFutureList), new FutureCallback<List<TaskResult>>() {
        @Override
        public void onSuccess(@Nullable List<TaskResult> result) {
            TResponse response;

            try {
                if (result == null) {
                    response = emptyResponse(request, outputNames, outputTypes);
                } else {
                    response = createResponseFromResult(outputNames, outputTypes, result,
                            analyzedStatement.expectsAffectedRows(), request.creationTime(),
                            request.includeTypesOnResponse());
                }
            } catch (Throwable e) {
                sendResponse(listener, e);
                return;
            }

            if (jobId != null) {
                statsTables.jobFinished(jobId, null);
            }
            sendResponse(listener, response);
        }

        @Override
        public void onFailure(@Nonnull Throwable t) {
            logger.debug("Error processing SQLRequest", t);
            if (jobId != null) {
                statsTables.jobFinished(jobId, Exceptions.messageOf(t));
            }
            sendResponse(listener, buildSQLActionException(t));
        }
    });
}

From source file:org.opendaylight.openflowplugin.applications.bulk.o.matic.SalBulkFlowServiceImpl.java

@Override
public Future<RpcResult<Void>> addFlowsRpc(AddFlowsRpcInput input) {
    List<ListenableFuture<RpcResult<AddFlowOutput>>> bulkResults = new ArrayList<>();

    for (BulkFlowBaseContentGrouping bulkFlow : input.getBulkFlowItem()) {
        AddFlowInputBuilder flowInputBuilder = new AddFlowInputBuilder((Flow) bulkFlow);
        final NodeRef nodeRef = bulkFlow.getNode();
        flowInputBuilder.setNode(nodeRef);
        flowInputBuilder.setTableId(bulkFlow.getTableId());
        Future<RpcResult<AddFlowOutput>> rpcAddFlowResult = flowService.addFlow(flowInputBuilder.build());
        bulkResults.add(JdkFutureAdapters.listenInPoolThread(rpcAddFlowResult));
    }//from  w w w  .  j a  v  a2  s.  c  o m
    return handleResultFuture(Futures.allAsList(bulkResults));
}

From source file:io.druid.server.namespace.KafkaExtractionManager.java

@LifecycleStop
public void stop() {
    executorService.shutdown();
    Futures.allAsList(futures).cancel(true);
}

From source file:io.prestosql.plugin.hive.HivePageSink.java

private ListenableFuture<Collection<Slice>> doFinish() {
    ImmutableList.Builder<Slice> partitionUpdates = ImmutableList.builder();
    List<Callable<Object>> verificationTasks = new ArrayList<>();
    for (HiveWriter writer : writers) {
        writer.commit();/* w  ww  .  j  a v  a  2 s .  c  o  m*/
        PartitionUpdate partitionUpdate = writer.getPartitionUpdate();
        partitionUpdates.add(wrappedBuffer(partitionUpdateCodec.toJsonBytes(partitionUpdate)));
        writer.getVerificationTask().map(Executors::callable).ifPresent(verificationTasks::add);
    }
    List<Slice> result = partitionUpdates.build();

    writtenBytes = writers.stream().mapToLong(HiveWriter::getWrittenBytes).sum();
    validationCpuNanos = writers.stream().mapToLong(HiveWriter::getValidationCpuNanos).sum();

    if (verificationTasks.isEmpty()) {
        return Futures.immediateFuture(result);
    }

    try {
        List<ListenableFuture<?>> futures = writeVerificationExecutor.invokeAll(verificationTasks).stream()
                .map(future -> (ListenableFuture<?>) future).collect(toList());
        return Futures.transform(Futures.allAsList(futures), input -> result, directExecutor());
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(e);
    }
}