Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:zipkin.cassandra.CassandraSpanStore.java

/** Used for testing */
void clear() {//from  w  w w.j av  a  2  s.c o  m
    try (Session session = cluster.connect()) {
        List<ListenableFuture<?>> futures = new LinkedList<>();
        for (String cf : ImmutableList.of("traces", "dependencies", "service_names", "span_names",
                "service_name_index", "service_span_name_index", "annotations_index", "span_duration_index")) {
            futures.add(session.executeAsync(format("TRUNCATE %s.%s", keyspace, cf)));
        }
        Futures.getUnchecked(Futures.allAsList(futures));
    }
}

From source file:org.stem.client.Session.java

void updateCreatedPools(ListeningExecutorService executor) {
    try {/*from   w w w  .j a  v a 2 s  .  co  m*/
        List<Host> toRemove = new ArrayList<Host>();
        List<ListenableFuture<?>> poolCreationFutures = new ArrayList<ListenableFuture<?>>();

        for (Host h : cluster.getMetadata().allHosts()) {
            ConnectionPool pool = pools.get(h);

            if (pool == null) { // no pool for this host
                if (h.isUp())
                    poolCreationFutures.add(maybeAddPool(h, executor));
            } else {
                pool.ensureCoreConnections();
            }
        }

        Futures.allAsList(poolCreationFutures).get();

        List<ListenableFuture<?>> poolRemovalFutures = new ArrayList<ListenableFuture<?>>(toRemove.size());
        for (Host h : toRemove)
            poolRemovalFutures.add(removePool(h));

        Futures.allAsList(poolRemovalFutures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
        logger.error("Unexpected error while refreshing connection pools", e.getCause());
    }
}

From source file:com.facebook.buck.cli.AdbHelper.java

/**
 * Execute an {@link AdbCallable} for all matching devices. This functions performs device
 * filtering based on three possible arguments:
 *
 *  -e (emulator-only) - only emulators are passing the filter
 *  -d (device-only) - only real devices are passing the filter
 *  -s (serial) - only device/emulator with specific serial number are passing the filter
 *
 *  If more than one device matches the filter this function will fail unless multi-install
 *  mode is enabled (-x). This flag is used as a marker that user understands that multiple
 *  devices will be used to install the apk if needed.
 *///from   ww  w.  ja va  2 s.  co m
public boolean adbCall(AdbCallable adbCallable) {
    List<IDevice> devices;

    try (TraceEventLogger ignored = TraceEventLogger.start(buckEventBus, "set_up_adb_call")) {

        // Initialize adb connection.
        AndroidDebugBridge adb = createAdb(context);
        if (adb == null) {
            console.printBuildFailure("Failed to create adb connection.");
            return false;
        }

        // Build list of matching devices.
        devices = filterDevices(adb.getDevices());
        if (devices == null) {
            if (buckConfig.getRestartAdbOnFailure()) {
                console.printErrorText("No devices found with adb, restarting adb-server.");
                adb.restart();
                devices = filterDevices(adb.getDevices());
            }

            if (devices == null) {
                return false;
            }
        }
    }

    int adbThreadCount = options.getAdbThreadCount();
    if (adbThreadCount <= 0) {
        adbThreadCount = devices.size();
    }

    // Start executions on all matching devices.
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    ListeningExecutorService executorService = listeningDecorator(
            newMultiThreadExecutor(getClass().getSimpleName(), adbThreadCount));
    for (final IDevice device : devices) {
        futures.add(executorService.submit(adbCallable.forDevice(device)));
    }

    // Wait for all executions to complete or fail.
    List<Boolean> results = null;
    try {
        results = Futures.allAsList(futures).get();
    } catch (ExecutionException ex) {
        console.printBuildFailure("Failed: " + adbCallable);
        ex.printStackTrace(console.getStdErr());
        return false;
    } catch (InterruptedException ex) {
        console.printBuildFailure("Interrupted.");
        ex.printStackTrace(console.getStdErr());
        return false;
    } finally {
        executorService.shutdownNow();
    }

    int successCount = 0;
    for (Boolean result : results) {
        if (result) {
            successCount++;
        }
    }
    int failureCount = results.size() - successCount;

    // Report results.
    if (successCount > 0) {
        console.printSuccess(String.format("Successfully ran %s on %d device(s)", adbCallable, successCount));
    }
    if (failureCount > 0) {
        console.printBuildFailure(String.format("Failed to %s on %d device(s).", adbCallable, failureCount));
    }

    return failureCount == 0;
}

From source file:org.thingsboard.server.dao.timeseries.BaseTimeseriesDao.java

private AsyncFunction<List<Long>, List<ResultSet>> getFetchChunksAsyncFunction(String entityType, UUID entityId,
        String key, Aggregation aggregation, long startTs, long endTs) {
    return partitions -> {
        try {/*from  w w w  . ja v  a  2 s.  c om*/
            PreparedStatement proto = getFetchStmt(aggregation);
            List<ResultSetFuture> futures = new ArrayList<>(partitions.size());
            for (Long partition : partitions) {
                log.trace("Fetching data for partition [{}] for entityType {} and entityId {}", partition,
                        entityType, entityId);
                BoundStatement stmt = proto.bind();
                stmt.setString(0, entityType);
                stmt.setUUID(1, entityId);
                stmt.setString(2, key);
                stmt.setLong(3, partition);
                stmt.setLong(4, startTs);
                stmt.setLong(5, endTs);
                log.debug("Generated query [{}] for entityType {} and entityId {}", stmt, entityType, entityId);
                futures.add(executeAsyncRead(stmt));
            }
            return Futures.allAsList(futures);
        } catch (Throwable e) {
            log.error("Failed to fetch data", e);
            throw e;
        }
    };
}

From source file:com.rackspacecloud.blueflood.outputs.handlers.RollupHandler.java

public Map<Locator, MetricData> getRollupByGranularity(final String tenantId, final List<String> metrics,
        final long from, final long to, final Granularity g) {

    final Timer.Context ctx = metrics.size() == 1 ? plotTimers.SPLOT_TIMER.timer.time()
            : plotTimers.MPLOT_TIMER.timer.time();
    Future<List<SearchResult>> unitsFuture = null;
    List<SearchResult> units = null;
    List<Locator> locators = new ArrayList<Locator>();

    Timer.Context c = timerRorCalcUnits.time();

    for (String metric : metrics) {
        locators.add(Locator.createLocatorFromPathComponents(tenantId, metric));
    }/* w w  w. ja v a 2  s  .c  om*/

    queriesSizeHist.update(locators.size());

    if (Util.shouldUseESForUnits()) {
        unitsFuture = ESUnitExecutor.submit(new Callable() {

            @Override
            public List<SearchResult> call() throws Exception {
                DiscoveryIO discoveryIO = (DiscoveryIO) ModuleLoader.getInstance(DiscoveryIO.class,
                        CoreConfig.DISCOVERY_MODULES);

                if (discoveryIO == null) {
                    log.warn("USE_ES_FOR_UNITS has been set to true, but no discovery module found."
                            + " Please check your config");
                    return null;
                }
                return discoveryIO.search(tenantId, metrics);
            }
        });
    }

    MetricsRWDelegator delegator = new MetricsRWDelegator();
    final Map<Locator, MetricData> metricDataMap = delegator.getDatapointsForRange(locators,
            new Range(g.snapMillis(from), to), g);

    if (unitsFuture != null) {
        try {
            units = unitsFuture.get();
            for (SearchResult searchResult : units) {
                Locator locator = Locator.createLocatorFromPathComponents(searchResult.getTenantId(),
                        searchResult.getMetricName());
                if (metricDataMap.containsKey(locator))
                    metricDataMap.get(locator).setUnit(searchResult.getUnit());
            }
        } catch (Exception e) {
            log.warn(
                    "Exception encountered while getting units from ES, unit will be set to unknown in query results",
                    e);
        }
    }

    c.stop();

    if (locators.size() == 1) {
        for (final Map.Entry<Locator, MetricData> metricData : metricDataMap.entrySet()) {
            Timer.Context context = rollupsOnReadTimers.RR_SPLOT_TIMER.timer.time();
            repairMetrics(metricData.getKey(), metricData.getValue(), from, to, g);
            context.stop();
        }
    } else if (locators.size() > 1
            && Configuration.getInstance().getBooleanProperty(CoreConfig.TURN_OFF_RR_MPLOT) == false) {
        Timer.Context context = rollupsOnReadTimers.RR_MPLOT_TIMER.timer.time();
        ArrayList<ListenableFuture<Boolean>> futures = new ArrayList<ListenableFuture<Boolean>>();
        for (final Map.Entry<Locator, MetricData> metricData : metricDataMap.entrySet()) {
            futures.add(rollupsOnReadExecutor.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() {
                    return repairMetrics(metricData.getKey(), metricData.getValue(), from, to, g);
                }
            }));
        }
        ListenableFuture<List<Boolean>> aggregateFuture = Futures.allAsList(futures);
        try {
            aggregateFuture.get(rollupOnReadTimeout.getValue(), rollupOnReadTimeout.getUnit());
        } catch (Exception e) {
            aggregateFuture.cancel(true);
            exceededQueryTimeout.mark();
            log.warn("Exception encountered while doing rollups on read, incomplete rollups will be returned.",
                    e);
        }
        context.stop();
    }

    for (MetricData metricData : metricDataMap.values()) {

        // we used to track enum queries here,
        // but since enum is removed, this currently is
        // a no op, doesn't track any queries
        markQueryByRollupType(metricData);
    }

    ctx.stop();
    return metricDataMap;
}

From source file:org.apache.cassandra.repair.RepairSession.java

/**
 * Start RepairJob on given ColumnFamilies.
 *
 * This first validates if all replica are available, and if they are,
 * creates RepairJobs and submit to run on given executor.
 *
 * @param executor Executor to run validation
 *///from  w w w.  j  a v  a  2  s. co  m
public void start(ListeningExecutorService executor) {
    String message;
    if (terminated)
        return;

    logger.info(String.format("[repair #%s] new session: will sync %s on range %s for %s.%s", getId(),
            repairedNodes(), range, keyspace, Arrays.toString(cfnames)));
    Tracing.traceRepair("Syncing range {}", range);
    SystemDistributedKeyspace.startRepairs(getId(), parentRepairSession, keyspace, cfnames, range, endpoints);

    if (endpoints.isEmpty()) {
        logger.info("[repair #{}] {}", getId(),
                message = String.format("No neighbors to repair with on range %s: session completed", range));
        Tracing.traceRepair(message);
        set(new RepairSessionResult(id, keyspace, range, Lists.<RepairResult>newArrayList()));
        SystemDistributedKeyspace.failRepairs(getId(), keyspace, cfnames, new RuntimeException(message));
        return;
    }

    // Checking all nodes are live
    for (InetAddress endpoint : endpoints) {
        if (!FailureDetector.instance.isAlive(endpoint)) {
            message = String.format("Cannot proceed on repair because a neighbor (%s) is dead: session failed",
                    endpoint);
            logger.error("[repair #{}] {}", getId(), message);
            Exception e = new IOException(message);
            setException(e);
            SystemDistributedKeyspace.failRepairs(getId(), keyspace, cfnames, e);
            return;
        }
    }

    // Create and submit RepairJob for each ColumnFamily
    List<ListenableFuture<RepairResult>> jobs = new ArrayList<>(cfnames.length);
    for (String cfname : cfnames) {
        RepairJob job = new RepairJob(this, cfname, parallelismDegree, repairedAt, taskExecutor);
        executor.execute(job);
        jobs.add(job);
    }

    // When all RepairJobs are done without error, cleanup and set the final result
    Futures.addCallback(Futures.allAsList(jobs), new FutureCallback<List<RepairResult>>() {
        public void onSuccess(List<RepairResult> results) {
            // this repair session is completed
            logger.info("[repair #{}] {}", getId(), "Session completed successfully");
            Tracing.traceRepair("Completed sync of range {}", range);
            set(new RepairSessionResult(id, keyspace, range, results));

            taskExecutor.shutdown();
            // mark this session as terminated
            terminate();
        }

        public void onFailure(Throwable t) {
            logger.error(String.format("[repair #%s] Session completed with the following error", getId()), t);
            Tracing.traceRepair("Session completed with the following error: {}", t);
            forceShutdown(t);
        }
    });
}

From source file:com.continuuity.weave.internal.appmaster.ApplicationMasterService.java

private void doStart() throws Exception {
    LOG.info("Start application master with spec: " + WeaveSpecificationAdapter.create().toJson(weaveSpec));

    // initialize the event handler, if it fails, it will fail the application.
    eventHandler.initialize(new BasicEventHandlerContext(weaveSpec.getEventHandler()));

    instanceChangeExecutor = Executors
            .newSingleThreadExecutor(Threads.createDaemonThreadFactory("instanceChanger"));

    kafkaServer = new EmbeddedKafkaServer(new File(Constants.Files.KAFKA), generateKafkaConfig());

    // Must start tracker before start AMClient
    LOG.info("Starting application master tracker server");
    trackerService.startAndWait();/*from   ww w. j a  v  a  2 s.  c  om*/
    URL trackerUrl = trackerService.getUrl();
    LOG.info("Started application master tracker server on " + trackerUrl);

    amClient.setTracker(trackerService.getBindAddress(), trackerUrl);
    amClient.startAndWait();

    // Creates ZK path for runnable and kafka logging service
    Futures.allAsList(
            ImmutableList.of(zkClient.create("/" + runId.getId() + "/runnables", null, CreateMode.PERSISTENT),
                    zkClient.create("/" + runId.getId() + "/kafka", null, CreateMode.PERSISTENT)))
            .get();

    // Starts kafka server
    LOG.info("Starting kafka server");

    kafkaServer.startAndWait();
    LOG.info("Kafka server started");

    runnableContainerRequests = initContainerRequests();
}

From source file:com.yahoo.yqlplus.engine.internal.java.sequences.Sequences.java

public static <ROW, SEQUENCE extends Iterable<ROW>, SET> ListenableFuture<List<ROW>> invokeBatchSet(
        final ListeningExecutorService workExecutor, final Function<List<SET>, SEQUENCE> source,
        final List<SET> keys, Tracer tracer, Timeout timeout, TimeoutHandler handler) throws Exception {
    List<ListenableFuture<SEQUENCE>> results = Lists.newArrayList();
    List<SET> methodArgs = Lists.newArrayList();
    for (int i = 0; i < keys.size(); i++) {
        if (keys.get(i) != null) {
            methodArgs.add(keys.get(i));
        } else {//from  w  w  w  .ja  v  a 2  s . c om
            results.add(workExecutor.submit(createJob(tracer, source, methodArgs)));
            methodArgs = Lists.newArrayList();
        }
    }
    ListenableFuture<List<SEQUENCE>> gather = Futures.allAsList(results);
    return handler.withTimeout(gatherResults(workExecutor, gather, 1), timeout.verify(),
            timeout.getTickUnits());
}

From source file:io.v.todos.persistence.syncbase.SyncbaseTodoList.java

@Override
public void completeTodoList() {
    trap(Batch.runInBatch(getVContext(), getDatabase(), new BatchOptions(), new Batch.BatchOperation() {
        @Override//from  ww  w . j  a  va  2  s .  c o m
        public ListenableFuture<Void> run(final BatchDatabase db) {
            return sExecutor.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    InputChannel<KeyValue> scan = mList.scan(getVContext(),
                            RowRange.prefix(SyncbaseTodoList.TASKS_PREFIX));

                    List<ListenableFuture<Void>> puts = new ArrayList<>();

                    for (KeyValue kv : InputChannels.asIterable(scan)) {
                        TaskSpec taskSpec = castFromSyncbase(kv.getValue().getElem(), TaskSpec.class);
                        if (!taskSpec.getDone()) {
                            taskSpec.setDone(true);
                            puts.add(mList.put(getVContext(), kv.getKey(), taskSpec));
                        }
                    }

                    if (!puts.isEmpty()) {
                        puts.add(updateListTimestamp());
                    }
                    VFutures.sync(Futures.allAsList(puts));
                    return null;
                }
            });
        }
    }));
}

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.strategy.SyncPlanPushStrategyIncrementalImpl.java

ListenableFuture<RpcResult<Void>> addMissingFlows(final NodeId nodeId,
        final InstanceIdentifier<FlowCapableNode> nodeIdent,
        final Map<TableKey, ItemSyncBox<Flow>> flowsInTablesSyncBox, final SyncCrudCounters counters) {
    if (flowsInTablesSyncBox.isEmpty()) {
        LOG.trace("no tables in config for node: {} -> SKIPPING", nodeId.getValue());
        return RpcResultBuilder.<Void>success().buildFuture();
    }//from   w  ww  .  j a va 2s  . c o m

    final List<ListenableFuture<RpcResult<AddFlowOutput>>> allResults = new ArrayList<>();
    final List<ListenableFuture<RpcResult<UpdateFlowOutput>>> allUpdateResults = new ArrayList<>();
    final CrudCounts flowCrudCounts = counters.getFlowCrudCounts();

    for (Map.Entry<TableKey, ItemSyncBox<Flow>> flowsInTableBoxEntry : flowsInTablesSyncBox.entrySet()) {
        final TableKey tableKey = flowsInTableBoxEntry.getKey();
        final ItemSyncBox<Flow> flowSyncBox = flowsInTableBoxEntry.getValue();

        final KeyedInstanceIdentifier<Table, TableKey> tableIdent = nodeIdent.child(Table.class, tableKey);

        for (final Flow flow : flowSyncBox.getItemsToPush()) {
            final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent = tableIdent.child(Flow.class,
                    flow.getKey());

            LOG.trace("adding flow {} in table {} - absent on device {} match{}", flow.getId(), tableKey,
                    nodeId, flow.getMatch());

            allResults.add(JdkFutureAdapters.listenInPoolThread(flowForwarder.add(flowIdent, flow, nodeIdent)));
            flowCrudCounts.incAdded();
        }

        for (final ItemSyncBox.ItemUpdateTuple<Flow> flowUpdate : flowSyncBox.getItemsToUpdate()) {
            final Flow existingFlow = flowUpdate.getOriginal();
            final Flow updatedFlow = flowUpdate.getUpdated();

            final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent = tableIdent.child(Flow.class,
                    updatedFlow.getKey());
            LOG.trace("flow {} in table {} - needs update on device {} match{}", updatedFlow.getId(), tableKey,
                    nodeId, updatedFlow.getMatch());

            allUpdateResults.add(JdkFutureAdapters
                    .listenInPoolThread(flowForwarder.update(flowIdent, existingFlow, updatedFlow, nodeIdent)));
            flowCrudCounts.incUpdated();
        }
    }

    final ListenableFuture<RpcResult<Void>> singleVoidAddResult = Futures.transform(
            Futures.allAsList(allResults),
            ReconcileUtil.<AddFlowOutput>createRpcResultCondenser("flow adding"));

    final ListenableFuture<RpcResult<Void>> singleVoidUpdateResult = Futures.transform(
            Futures.allAsList(allUpdateResults),
            ReconcileUtil.<UpdateFlowOutput>createRpcResultCondenser("flow updating"));

    return Futures.transform(Futures.allAsList(singleVoidAddResult, singleVoidUpdateResult),
            ReconcileUtil.<Void>createRpcResultCondenser("flow add/update"));
}