List of usage examples for com.google.common.util.concurrent Futures allAsList
@Beta @CheckReturnValue public static <V> ListenableFuture<List<V>> allAsList( Iterable<? extends ListenableFuture<? extends V>> futures)
From source file:com.yahoo.yqlplus.engine.internal.java.sequences.Sequences.java
public static <ROW, SEQUENCE extends Iterable<ROW>, SET> ListenableFuture<List<ROW>> invokeAsyncSet( final Executor executor, final AsyncFunction<List<SET>, SEQUENCE> source, List<SET> keys, Tracer tracer, Timeout timeout, TimeoutHandler handler) throws Exception { // TODO OPTIMIZE: List not needed in this case List<ListenableFuture<SEQUENCE>> results = Lists.newArrayList(); final Tracer childTracer = tracer.start(tracer.getGroup(), tracer.getName()); ListenableFuture<SEQUENCE> result = source.apply(keys); results.add(result);/*from w w w .j av a 2s . c om*/ result.addListener(new Runnable() { @Override public void run() { childTracer.end(); } }, MoreExecutors.sameThreadExecutor()); ListenableFuture<List<SEQUENCE>> gather = Futures.allAsList(results); return handler.withTimeout(gatherResults(executor, gather, 1), timeout.verify(), timeout.getTickUnits()); }
From source file:org.thingsboard.server.dao.timeseries.BaseTimeseriesService.java
@Override public ListenableFuture<List<Void>> remove(TenantId tenantId, EntityId entityId, List<DeleteTsKvQuery> deleteTsKvQueries) { validate(entityId);//from w ww .ja v a2 s .c o m deleteTsKvQueries.forEach(BaseTimeseriesService::validate); List<ListenableFuture<Void>> futures = Lists .newArrayListWithExpectedSize(deleteTsKvQueries.size() * DELETES_PER_ENTRY); for (DeleteTsKvQuery tsKvQuery : deleteTsKvQueries) { deleteAndRegisterFutures(tenantId, futures, entityId, tsKvQuery); } return Futures.allAsList(futures); }
From source file:org.apache.cassandra.repair.RepairJob.java
/** * Creates {@link ValidationTask} and submit them to task executor in parallel. * * @param endpoints Endpoint addresses to send validation request * @return Future that can get all {@link TreeResponse} from replica, if all validation succeed. *///from w ww .j a v a2 s . c o m private ListenableFuture<List<TreeResponse>> sendValidationRequest(Collection<InetAddress> endpoints) { String message = String.format("Requesting merkle trees for %s (to %s)", desc.columnFamily, endpoints); logger.info("[repair #{}] {}", desc.sessionId, message); Tracing.traceRepair(message); int gcBefore = Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily) .gcBefore(System.currentTimeMillis()); List<ListenableFuture<TreeResponse>> tasks = new ArrayList<>(endpoints.size()); for (InetAddress endpoint : endpoints) { ValidationTask task = new ValidationTask(desc, endpoint, gcBefore); tasks.add(task); session.waitForValidation(Pair.create(desc, endpoint), task); taskExecutor.execute(task); } return Futures.allAsList(tasks); }
From source file:com.facebook.buck.cli.AdbCommandRunner.java
/** * Execute an {@link AdbCallable} for all matching devices. This functions performs device * filtering based on three possible arguments: * * -e (emulator-only) - only emulators are passing the filter * -d (device-only) - only real devices are passing the filter * -s (serial) - only device/emulator with specific serial number are passing the filter * * If more than one device matches the filter this function will fail unless multi-install * mode is enabled (-x). This flag is used as a marker that user understands that multiple * devices will be used to install the apk if needed. *//* w ww. ja va 2s. c om*/ @VisibleForTesting protected boolean adbCall(AdbOptions options, TargetDeviceOptions deviceOptions, ExecutionContext context, AdbCallable adbCallable) { // Initialize adb connection. AndroidDebugBridge adb = createAdb(context); if (adb == null) { console.printBuildFailure("Failed to create adb connection."); return false; } // Build list of matching devices. List<IDevice> devices = filterDevices(adb.getDevices(), options, deviceOptions); if (devices == null) { return false; } int adbThreadCount = options.getAdbThreadCount(); if (adbThreadCount <= 0) { adbThreadCount = devices.size(); } // Start executions on all matching devices. List<ListenableFuture<Boolean>> futures = Lists.newArrayList(); ListeningExecutorService executorService = listeningDecorator( newMultiThreadExecutor(getClass().getSimpleName(), adbThreadCount)); for (final IDevice device : devices) { futures.add(executorService.submit(adbCallable.forDevice(device))); } // Wait for all executions to complete or fail. List<Boolean> results = null; try { results = Futures.allAsList(futures).get(); } catch (ExecutionException ex) { console.printBuildFailure("Failed: " + adbCallable); ex.printStackTrace(console.getStdErr()); return false; } catch (InterruptedException ex) { console.printBuildFailure("Interrupted."); ex.printStackTrace(console.getStdErr()); return false; } finally { executorService.shutdownNow(); } int successCount = 0; for (Boolean result : results) { if (result) { successCount++; } } int failureCount = results.size() - successCount; // Report results. if (successCount > 0) { console.printSuccess(String.format("Succesfully ran %s on %d device(s)", adbCallable, successCount)); } if (failureCount > 0) { console.printBuildFailure(String.format("Failed to %s on %d device(s).", adbCallable, failureCount)); } return failureCount == 0; }
From source file:org.apache.druid.server.coordinator.CostBalancerStrategy.java
@Override public Iterator<ServerHolder> pickServersToDrop(DataSegment toDrop, NavigableSet<ServerHolder> serverHolders) { List<ListenableFuture<Pair<Double, ServerHolder>>> futures = Lists.newArrayList(); for (final ServerHolder server : serverHolders) { futures.add(exec.submit(() -> Pair.of(computeCost(toDrop, server, true), server))); }/*from w w w . ja va 2s . c o m*/ final ListenableFuture<List<Pair<Double, ServerHolder>>> resultsFuture = Futures.allAsList(futures); try { // results is an un-ordered list of a pair consisting of the 'cost' of a segment being on a server and the server List<Pair<Double, ServerHolder>> results = resultsFuture.get(); return results.stream() // Comparator.comapringDouble will order by lowest cost... // reverse it because we want to drop from the highest cost servers first .sorted(Comparator.comparingDouble((Pair<Double, ServerHolder> o) -> o.lhs).reversed()) .map(x -> x.rhs).collect(Collectors.toList()).iterator(); } catch (Exception e) { log.makeAlert(e, "Cost Balancer Multithread strategy wasn't able to complete cost computation.").emit(); } return Collections.emptyIterator(); }
From source file:com.google.idea.blaze.java.libraries.JarCache.java
private void refresh(@Nullable BlazeContext context, boolean removeMissingFiles) { if (!enabled || sourceFileToCacheKey == null) { return;/* w ww . ja v a 2 s. c om*/ } // Ensure the cache dir exists if (!cacheDir.exists()) { if (!cacheDir.mkdirs()) { LOG.error("Could not create jar cache directory"); return; } } // Discover state of source jars ImmutableMap<File, Long> sourceFileTimestamps = FileDiffer.readFileState(sourceFileToCacheKey.keySet()); if (sourceFileTimestamps == null) { return; } ImmutableMap.Builder<String, Long> sourceFileCacheKeyToTimestamp = ImmutableMap.builder(); for (Map.Entry<File, Long> entry : sourceFileTimestamps.entrySet()) { String cacheKey = sourceFileToCacheKey.get(entry.getKey()); sourceFileCacheKeyToTimestamp.put(cacheKey, entry.getValue()); } // Discover current on-disk cache state File[] cacheFiles = cacheDir.listFiles(); assert cacheFiles != null; ImmutableMap<File, Long> cacheFileTimestamps = FileDiffer.readFileState(Lists.newArrayList(cacheFiles)); if (cacheFileTimestamps == null) { return; } ImmutableMap.Builder<String, Long> cachedFileCacheKeyToTimestamp = ImmutableMap.builder(); for (Map.Entry<File, Long> entry : cacheFileTimestamps.entrySet()) { String cacheKey = entry.getKey().getName(); // Cache key == file name cachedFileCacheKeyToTimestamp.put(cacheKey, entry.getValue()); } List<String> updatedFiles = Lists.newArrayList(); List<String> removedFiles = Lists.newArrayList(); FileDiffer.diffState(cachedFileCacheKeyToTimestamp.build(), sourceFileCacheKeyToTimestamp.build(), updatedFiles, removedFiles); ListeningExecutorService executor = FetchExecutor.EXECUTOR; List<ListenableFuture<?>> futures = Lists.newArrayList(); Map<String, File> cacheKeyToSourceFile = sourceFileToCacheKey.inverse(); for (String cacheKey : updatedFiles) { File sourceFile = cacheKeyToSourceFile.get(cacheKey); File cacheFile = cacheFileForKey(cacheKey); futures.add(executor.submit(() -> { try { Files.copy(Paths.get(sourceFile.getPath()), Paths.get(cacheFile.getPath()), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); } catch (IOException e) { LOG.warn(e); } })); } if (removeMissingFiles) { for (String cacheKey : removedFiles) { File cacheFile = cacheFileForKey(cacheKey); futures.add(executor.submit(() -> { try { Files.deleteIfExists(Paths.get(cacheFile.getPath())); } catch (IOException e) { LOG.warn(e); } })); } } try { Futures.allAsList(futures).get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn(e); } catch (ExecutionException e) { LOG.error(e); } if (context != null && updatedFiles.size() > 0) { context.output(PrintOutput.log(String.format("Copied %d jars", updatedFiles.size()))); } if (context != null && removedFiles.size() > 0 && removeMissingFiles) { context.output(PrintOutput.log(String.format("Removed %d jars", removedFiles.size()))); } if (context != null) { try { File[] finalCacheFiles = cacheDir.listFiles(); assert finalCacheFiles != null; ImmutableMap<File, Long> cacheFileSizes = FileSizeScanner .readFilesizes(Lists.newArrayList(finalCacheFiles)); Long total = cacheFileSizes.values().stream().reduce((size1, size2) -> size1 + size2).orElse(0L); context.output(PrintOutput.log(String.format("Total Jar Cache size: %d kB (%d files)", total / 1024, finalCacheFiles.length))); } catch (Exception e) { LOG.warn("Could not determine cache size", e); } } }
From source file:org.opendaylight.openflowplugin.impl.services.SalMetersBatchServiceImpl.java
@Override public Future<RpcResult<RemoveMetersBatchOutput>> removeMetersBatch(final RemoveMetersBatchInput input) { LOG.trace("Removing meters @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchRemoveMeters().size()); final ArrayList<ListenableFuture<RpcResult<RemoveMeterOutput>>> resultsLot = new ArrayList<>(); for (BatchRemoveMeters addMeter : input.getBatchRemoveMeters()) { final RemoveMeterInput removeMeterInput = new RemoveMeterInputBuilder(addMeter) .setMeterRef(createMeterRef(input.getNode(), addMeter)).setNode(input.getNode()).build(); resultsLot.add(JdkFutureAdapters.listenInPoolThread(salMeterService.removeMeter(removeMeterInput))); }/* ww w. j av a 2s. co m*/ final ListenableFuture<RpcResult<List<BatchFailedMetersOutput>>> commonResult = Futures.transform( Futures.allAsList(resultsLot), MeterUtil.<RemoveMeterOutput>createCumulativeFunction(input.getBatchRemoveMeters())); ListenableFuture<RpcResult<RemoveMetersBatchOutput>> removeMetersBulkFuture = Futures .transform(commonResult, MeterUtil.METER_REMOVE_TRANSFORM); if (input.isBarrierAfter()) { removeMetersBulkFuture = BarrierUtil.chainBarrier(removeMetersBulkFuture, input.getNode(), transactionService, MeterUtil.METER_REMOVE_COMPOSING_TRANSFORM); } return removeMetersBulkFuture; }
From source file:org.opendaylight.openflowplugin.impl.services.SalGroupsBatchServiceImpl.java
@Override public Future<RpcResult<RemoveGroupsBatchOutput>> removeGroupsBatch(final RemoveGroupsBatchInput input) { LOG.trace("Removing groups @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchRemoveGroups().size()); final ArrayList<ListenableFuture<RpcResult<RemoveGroupOutput>>> resultsLot = new ArrayList<>(); for (BatchRemoveGroups addGroup : input.getBatchRemoveGroups()) { final RemoveGroupInput removeGroupInput = new RemoveGroupInputBuilder(addGroup) .setGroupRef(createGroupRef(input.getNode(), addGroup)).setNode(input.getNode()).build(); resultsLot.add(JdkFutureAdapters.listenInPoolThread(salGroupService.removeGroup(removeGroupInput))); }/* ww w.ja va 2s. c om*/ final ListenableFuture<RpcResult<List<BatchFailedGroupsOutput>>> commonResult = Futures.transform( Futures.allAsList(resultsLot), GroupUtil.<RemoveGroupOutput>createCumulatingFunction(input.getBatchRemoveGroups())); ListenableFuture<RpcResult<RemoveGroupsBatchOutput>> removeGroupsBulkFuture = Futures .transform(commonResult, GroupUtil.GROUP_REMOVE_TRANSFORM); if (input.isBarrierAfter()) { removeGroupsBulkFuture = BarrierUtil.chainBarrier(removeGroupsBulkFuture, input.getNode(), transactionService, GroupUtil.GROUP_REMOVE_COMPOSING_TRANSFORM); } return removeGroupsBulkFuture; }
From source file:com.facebook.buck.distributed.build_client.PreBuildPhase.java
/** Run all steps required before the build. */ public Pair<StampedeId, ListenableFuture<Void>> runPreDistBuildLocalStepsAsync( ListeningExecutorService networkExecutorService, ProjectFilesystem projectFilesystem, FileHashCache fileHashCache, BuckEventBus eventBus, BuildId buildId, BuildMode buildMode, MinionRequirements minionRequirements, String repository, String tenantId, ListenableFuture<ParallelRuleKeyCalculator<RuleKey>> localRuleKeyCalculatorFuture) throws IOException, DistBuildRejectedException { ConsoleEventsDispatcher consoleEventsDispatcher = new ConsoleEventsDispatcher(eventBus); distBuildClientStats.startTimer(CREATE_DISTRIBUTED_BUILD); List<String> buildTargets = topLevelTargets.stream().map(BuildTarget::getFullyQualifiedName).sorted() .collect(Collectors.toList()); BuildJob job = distBuildService.createBuild(buildId, buildMode, minionRequirements, repository, tenantId, buildTargets, buildLabel);//from w ww . j ava2 s . co m distBuildClientStats.stopTimer(CREATE_DISTRIBUTED_BUILD); if (job.getBuildLabel() != null) { // Override the build label with the server-side inferred label. this.buildLabel = job.getBuildLabel(); distBuildClientStats.setUserOrInferredBuildLabel(buildLabel); } StampedeId stampedeId = job.getStampedeId(); eventBus.post(new DistBuildCreatedEvent(stampedeId)); LOG.info("Created job. StampedeId = " + stampedeId.getId()); consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of(), "SERIALIZING AND UPLOADING DATA"); List<ListenableFuture<?>> asyncJobs = new LinkedList<>(); asyncJobs.add(Futures.transformAsync(asyncJobState, jobState -> { LOG.info("Uploading local changes."); return distBuildService.uploadMissingFilesAsync(distBuildCellIndexer.getLocalFilesystemsByCellIndex(), jobState.fileHashes, distBuildClientStats, networkExecutorService); }, networkExecutorService)); asyncJobs.add(Futures.transform(asyncJobState, jobState -> { LOG.info("Uploading target graph."); try { distBuildService.uploadTargetGraph(jobState, stampedeId, distBuildClientStats); } catch (IOException e) { throw new RuntimeException("Failed to upload target graph with exception.", e); } return null; }, networkExecutorService)); LOG.info("Uploading buck dot-files."); asyncJobs.add(distBuildService.uploadBuckDotFilesAsync(stampedeId, projectFilesystem, fileHashCache, distBuildClientStats, networkExecutorService)); asyncJobs.add(networkExecutorService.submit(() -> { LOG.info("Setting buck version."); try { distBuildService.setBuckVersion(stampedeId, buckVersion, distBuildClientStats); } catch (IOException e) { throw new RuntimeException("Failed to set buck-version with exception.", e); } })); DistBuildConfig distBuildConfig = new DistBuildConfig(buildExecutorArgs.getBuckConfig()); if (distBuildConfig.isUploadFromLocalCacheEnabled()) { asyncJobs.add(Futures.transformAsync(localRuleKeyCalculatorFuture, localRuleKeyCalculator -> { try (ArtifactCacheByBuildRule artifactCache = new DistBuildArtifactCacheImpl( actionAndTargetGraphs.getActionGraphAndBuilder().getActionGraphBuilder(), networkExecutorService, buildExecutorArgs.getArtifactCacheFactory().remoteOnlyInstance(true, false), eventBus, localRuleKeyCalculator, Optional.of(buildExecutorArgs.getArtifactCacheFactory().localOnlyInstance(true, false)))) { return new CacheOptimizedBuildTargetsQueueFactory( actionAndTargetGraphs.getActionGraphAndBuilder().getActionGraphBuilder(), artifactCache, /* isDeepRemoteBuild */ false, localRuleKeyCalculator.getRuleDepsCache(), /* shouldBuildSelectedTargetsLocally */ false) .uploadCriticalNodesFromLocalCache(topLevelTargets, distBuildClientStats); } catch (Exception e) { LOG.error(e, "Failed to create BuildTargetsQueue."); throw new RuntimeException(e); } }, networkExecutorService)); } ListenableFuture<Void> asyncPrep = Futures.transform(Futures.allAsList(asyncJobs), results -> { LOG.info("Finished async preparation of stampede job."); consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of(), "STARTING REMOTE BUILD"); // Everything is now setup remotely to run the distributed build. No more local prep. this.distBuildClientStats.stopTimer(LOCAL_PREPARATION); return null; }, MoreExecutors.directExecutor()); return new Pair<StampedeId, ListenableFuture<Void>>(stampedeId, asyncPrep); }
From source file:zipkin.storage.cassandra3.Cassandra3Storage.java
/** Truncates all the column families, or throws on any failure. */ @VisibleForTesting//w ww . ja v a 2s . com void clear() { List<ListenableFuture<?>> futures = new LinkedList<>(); for (String cf : ImmutableList.of(Schema.TABLE_TRACES, Schema.TABLE_TRACE_BY_SERVICE_SPAN, Schema.TABLE_SERVICE_SPANS, Schema.TABLE_DEPENDENCIES)) { futures.add(session.get().executeAsync(format("TRUNCATE %s", cf))); } Futures.getUnchecked(Futures.allAsList(futures)); }