List of usage examples for com.google.common.util.concurrent Futures successfulAsList
@Beta @CheckReturnValue public static <V> ListenableFuture<List<V>> successfulAsList( Iterable<? extends ListenableFuture<? extends V>> futures)
From source file:org.opendaylight.openflowplugin.impl.services.SalFlowsBatchServiceImpl.java
@Override public Future<RpcResult<UpdateFlowsBatchOutput>> updateFlowsBatch(final UpdateFlowsBatchInput input) { LOG.trace("Updating flows @ {} : {}", PathUtil.extractNodeId(input.getNode()), input.getBatchUpdateFlows().size()); final ArrayList<ListenableFuture<RpcResult<UpdateFlowOutput>>> resultsLot = new ArrayList<>(); for (BatchUpdateFlows batchFlow : input.getBatchUpdateFlows()) { final UpdateFlowInput updateFlowInput = new UpdateFlowInputBuilder(input) .setOriginalFlow(new OriginalFlowBuilder(batchFlow.getOriginalBatchedFlow()).build()) .setUpdatedFlow(new UpdatedFlowBuilder(batchFlow.getUpdatedBatchedFlow()).build()) .setFlowRef(createFlowRef(input.getNode(), batchFlow)).setNode(input.getNode()).build(); resultsLot.add(JdkFutureAdapters.listenInPoolThread(salFlowService.updateFlow(updateFlowInput))); }/*from ww w. j a v a2 s. c o m*/ final ListenableFuture<RpcResult<List<BatchFailedFlowsOutput>>> commonResult = Futures.transform( Futures.successfulAsList(resultsLot), FlowUtil.<UpdateFlowOutput>createCumulatingFunction(input.getBatchUpdateFlows())); ListenableFuture<RpcResult<UpdateFlowsBatchOutput>> updateFlowsBulkFuture = Futures.transform(commonResult, FlowUtil.FLOW_UPDATE_TRANSFORM); if (input.isBarrierAfter()) { updateFlowsBulkFuture = BarrierUtil.chainBarrier(updateFlowsBulkFuture, input.getNode(), transactionService, FlowUtil.FLOW_UPDATE_COMPOSING_TRANSFORM); } return updateFlowsBulkFuture; }
From source file:org.jclouds.packet.compute.strategy.CreateSshKeysThenCreateNodes.java
private void registerAutoGeneratedKeyPairCleanupCallbacks(Map<?, ListenableFuture<Void>> responses, final Set<String> generatedSshKeyIds) { // The Futures.allAsList fails immediately if some of the futures fail. // The Futures.successfulAsList, however, // returns a list containing the results or 'null' for those futures that // failed. We want to wait for all them // (even if they fail), so better use the latter form. ListenableFuture<List<Void>> aggregatedResponses = Futures.successfulAsList(responses.values()); // Key pairs must be cleaned up after all futures completed (even if some // failed).//from w w w .java2s . c o m Futures.addCallback(aggregatedResponses, new FutureCallback<List<Void>>() { @Override public void onSuccess(List<Void> result) { cleanupAutoGeneratedKeyPairs(generatedSshKeyIds); } @Override public void onFailure(Throwable t) { cleanupAutoGeneratedKeyPairs(generatedSshKeyIds); } private void cleanupAutoGeneratedKeyPairs(Set<String> generatedSshKeyIds) { logger.debug(">> cleaning up auto-generated key pairs..."); for (String sshKeyId : generatedSshKeyIds) { try { api.sshKeyApi().delete(sshKeyId); } catch (Exception ex) { logger.warn(">> could not delete key pair %s: %s", sshKeyId, ex.getMessage()); } } } }, userExecutor); }
From source file:com.continuuity.weave.discovery.ZKDiscoveryService.java
private void updateService(NodeChildren children, final String service) { final String sb = "/" + service; final Multimap<String, Discoverable> newServices = HashMultimap.create(services.get()); newServices.removeAll(service);//w w w .j a va 2 s. c o m // Fetch data of all children nodes in parallel. List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.getChildren().size()); for (String child : children.getChildren()) { String path = sb + "/" + child; dataFutures.add(zkClient.getData(path)); } // Update the service map when all fetching are done. final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures); fetchFuture.addListener(new Runnable() { @Override public void run() { for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) { // For successful fetch, decode the content. if (nodeData != null) { Discoverable discoverable = decode(nodeData.getData()); if (discoverable != null) { newServices.put(service, discoverable); } } } // Replace the local service register with changes. services.set(newServices); } }, Threads.SAME_THREAD_EXECUTOR); }
From source file:com.continuuity.weave.internal.kafka.client.KafkaBrokerCache.java
private void getTopic(final String path, final String topic) { Futures.addCallback(zkClient.getChildren(path, new Watcher() { @Override/*from w w w. j a v a 2s .c om*/ public void process(WatchedEvent event) { // Other event type changes are either could be ignored or handled by parent watcher if (event.getType() == Event.EventType.NodeChildrenChanged) { getTopic(path, topic); } } }), new FutureCallback<NodeChildren>() { @Override public void onSuccess(NodeChildren result) { List<String> children = result.getChildren(); final List<ListenableFuture<BrokerPartition>> futures = Lists .newArrayListWithCapacity(children.size()); // Fetch data from each broken node for (final String brokerId : children) { Futures.transform(zkClient.getData(path + "/" + brokerId), new Function<NodeData, BrokerPartition>() { @Override public BrokerPartition apply(NodeData input) { return new BrokerPartition(brokerId, Integer.parseInt(new String(input.getData(), Charsets.UTF_8))); } }); } // When all fetching is done, build the partition size->broker map for this topic Futures.successfulAsList(futures).addListener(new Runnable() { @Override public void run() { Map<Integer, Set<String>> partitionBrokers = Maps.newHashMap(); for (ListenableFuture<BrokerPartition> future : futures) { try { BrokerPartition info = future.get(); Set<String> brokerSet = partitionBrokers.get(info.getPartitionSize()); if (brokerSet == null) { brokerSet = Sets.newHashSet(); partitionBrokers.put(info.getPartitionSize(), brokerSet); } brokerSet.add(info.getBrokerId()); } catch (Exception e) { // Exception is ignored, as it will be handled by parent watcher } } topicBrokers.put(topic, ImmutableSortedMap.copyOf(partitionBrokers)); } }, Threads.SAME_THREAD_EXECUTOR); } @Override public void onFailure(Throwable t) { // No-op. Failure would be handled by parent watcher already (e.g. node not exists -> children change in parent) } }); }
From source file:dk.ilios.spanner.internal.ExperimentingSpannerRun.java
/** * Schedule all the trials./* w w w . j av a 2 s.com*/ * <p> * <p>This method arranges all the {@link ScheduledTrial trials} to run according to their * scheduling criteria. The executor instance is responsible for enforcing max parallelism. */ private List<ListenableFuture<Trial.Result>> scheduleTrials(List<ScheduledTrial> trials, final ListeningExecutorService executor) { List<ListenableFuture<Trial.Result>> pendingTrials = Lists.newArrayList(); List<ScheduledTrial> serialTrials = Lists.newArrayList(); for (final ScheduledTrial scheduledTrial : trials) { if (scheduledTrial.policy() == TrialSchedulingPolicy.PARALLEL) { pendingTrials.add(executor.submit(scheduledTrial.trialTask())); } else { serialTrials.add(scheduledTrial); } } // A future representing the completion of all prior tasks. Futures.successfulAsList allows us // to ignore failure. ListenableFuture<?> previous = Futures.successfulAsList(pendingTrials); for (final ScheduledTrial scheduledTrial : serialTrials) { // each of these trials can only start after all prior trials have finished, so we use // Futures.transform to force the sequencing. ListenableFuture<Trial.Result> current = Futures.transform(previous, new AsyncFunction<Object, Trial.Result>() { @Override public ListenableFuture<Trial.Result> apply(Object ignored) { return executor.submit(scheduledTrial.trialTask()); } }); pendingTrials.add(current); // ignore failure of the prior task. previous = Futures.withFallback(current, FALLBACK_TO_NULL); } return pendingTrials; }
From source file:org.apache.twill.zookeeper.ZKOperations.java
/** * Deletes the given path recursively. The delete method will keep running until the given path is successfully * removed, which means if there are new node created under the given path while deleting, they'll get deleted * again. If there is {@link KeeperException} during the deletion other than * {@link KeeperException.NotEmptyException} or {@link KeeperException.NoNodeException}, * the exception would be reflected in the result future and deletion process will stop, * leaving the given path with intermediate state. * * @param path The path to delete./*from w w w .j a va 2 s. c om*/ * @return An {@link OperationFuture} that will be completed when the given path is deleted or bailed due to * exception. */ public static OperationFuture<String> recursiveDelete(final ZKClient zkClient, final String path) { final SettableOperationFuture<String> resultFuture = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR); // Try to delete the given path. Futures.addCallback(zkClient.delete(path), new FutureCallback<String>() { private final FutureCallback<String> deleteCallback = this; @Override public void onSuccess(String result) { // Path deleted successfully. Operation done. resultFuture.set(result); } @Override public void onFailure(Throwable t) { // Failed to delete the given path if (!(t instanceof KeeperException.NotEmptyException || t instanceof KeeperException.NoNodeException)) { // For errors other than NotEmptyException, treat the operation as failed. resultFuture.setException(t); return; } // If failed because of NotEmptyException, get the list of children under the given path Futures.addCallback(zkClient.getChildren(path), new FutureCallback<NodeChildren>() { @Override public void onSuccess(NodeChildren result) { // Delete all children nodes recursively. final List<OperationFuture<String>> deleteFutures = Lists.newLinkedList(); for (String child : result.getChildren()) { deleteFutures.add(recursiveDelete(zkClient, path + "/" + child)); } // When deletion of all children succeeded, delete the given path again. Futures.successfulAsList(deleteFutures).addListener(new Runnable() { @Override public void run() { for (OperationFuture<String> deleteFuture : deleteFutures) { try { // If any exception when deleting children, treat the operation as failed. deleteFuture.get(); } catch (Exception e) { resultFuture.setException(e.getCause()); } } Futures.addCallback(zkClient.delete(path), deleteCallback, Threads.SAME_THREAD_EXECUTOR); } }, Threads.SAME_THREAD_EXECUTOR); } @Override public void onFailure(Throwable t) { // If failed to get list of children, treat the operation as failed. resultFuture.setException(t); } }, Threads.SAME_THREAD_EXECUTOR); } }, Threads.SAME_THREAD_EXECUTOR); return resultFuture; }
From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java
@SuppressWarnings("checkstyle:methodname") private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) { final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames(); final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(), qNames, excludeQualifiedNames); final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream() .map(s -> service.submit(() -> { final QualifiedName tableName = QualifiedName.fromString(s, false); final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList(); int offset = 0; int count; Sort sort;//from ww w . j ava 2 s . com if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) { sort = new Sort("id", SortOrder.ASC); } else { sort = new Sort("part_id", SortOrder.ASC); } final Pageable pageable = new Pageable(10000, offset); do { final List<PartitionDto> partitionDtos = partitionService.list(tableName, null, null, sort, pageable, true, true, true); count = partitionDtos.size(); if (!partitionDtos.isEmpty()) { final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000); partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures .add(indexPartitionDtos(tableName, subPartitionsDtos))); offset = offset + count; pageable.setOffset(offset); } } while (count == 10000); return Futures.transform(Futures.successfulAsList(indexFutures), Functions.constant((Void) null)); })).collect(Collectors.toList()); final ListenableFuture<Void> processPartitionsFuture = Futures .transformAsync(Futures.successfulAsList(futures), input -> { final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL) .collect(Collectors.toList()); return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls), Functions.constant(null)); }); return Futures.transformAsync(processPartitionsFuture, input -> { elasticSearchUtil.refresh(); final List<ListenableFuture<Void>> cleanUpFutures = tables.stream() .map(s -> service.submit( () -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames))) .collect(Collectors.toList()); return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null)); }); }
From source file:org.opendaylight.openflowplugin.impl.util.FlatBatchUtil.java
/** * Merge list of Futures with partial results into one ListenableFuture with single result. * @param firedJobs list of ListenableFutures with RPC results {@link ProcessFlatBatchOutput} * @return ListenableFuture of RPC result with combined status and all errors + batch failures *///from w ww . j ava2 s .com public static ListenableFuture<RpcResult<ProcessFlatBatchOutput>> mergeJobsResultsFutures( final List<ListenableFuture<RpcResult<ProcessFlatBatchOutput>>> firedJobs) { return Futures.transform(Futures.successfulAsList(firedJobs), mergeRpcResults()); }
From source file:com.dogecoin.dogecoinj.net.discovery.TorDiscovery.java
private Collection<InetSocketAddress> lookupAddresses(long timeoutValue, TimeUnit timeoutUnit, List<Circuit> circuits) throws InterruptedException { createThreadPool(circuits.size() * hostNames.length); try {/* w w w . j a v a 2 s. c om*/ List<ListenableFuture<Lookup>> lookupFutures = Lists.newArrayList(); for (final Circuit circuit : circuits) { for (final String seed : hostNames) { lookupFutures.add(threadPool.submit(new Callable<Lookup>() { @Override public Lookup call() throws Exception { return new Lookup(circuit.getFinalCircuitNode().getRouter(), lookup(circuit, seed)); } })); } } threadPool.awaitTermination(timeoutValue, timeoutUnit); int timeouts = 0; for (ListenableFuture<Lookup> future : lookupFutures) { if (!future.isDone()) { timeouts++; future.cancel(true); } } if (timeouts > 0) log.warn("{} DNS lookups timed out", timeouts); try { List<Lookup> lookups = new ArrayList<Lookup>(Futures.successfulAsList(lookupFutures).get()); // Any failures will result in null entries. Remove them. lookups.removeAll(singleton(null)); // Use a map to enforce one result per exit node // TODO: randomize result selection better Map<HexDigest, InetSocketAddress> lookupMap = Maps.newHashMap(); for (Lookup lookup : lookups) { InetSocketAddress address = new InetSocketAddress(lookup.address, netParams.getPort()); lookupMap.put(lookup.router.getIdentityHash(), address); } return lookupMap.values(); } catch (ExecutionException e) { // Cannot happen, successfulAsList accepts failures throw new RuntimeException(e); } } finally { shutdownThreadPool(); } }
From source file:org.thingsboard.server.service.state.DefaultDeviceStateService.java
private void onClusterUpdateSync() { List<Tenant> tenants = tenantService.findTenants(new TextPageLink(Integer.MAX_VALUE)).getData(); for (Tenant tenant : tenants) { List<ListenableFuture<DeviceStateData>> fetchFutures = new ArrayList<>(); List<Device> devices = deviceService .findDevicesByTenantId(tenant.getId(), new TextPageLink(Integer.MAX_VALUE)).getData(); for (Device device : devices) { if (!routingService.resolveById(device.getId()).isPresent()) { if (!deviceStates.containsKey(device.getId())) { fetchFutures.add(fetchDeviceState(device)); }// www . ja v a 2 s . c o m } else { Set<DeviceId> tenantDeviceSet = tenantDevices.get(tenant.getId()); if (tenantDeviceSet != null) { tenantDeviceSet.remove(device.getId()); } deviceStates.remove(device.getId()); } } try { Futures.successfulAsList(fetchFutures).get().forEach(this::addDeviceUsingState); } catch (InterruptedException | ExecutionException e) { log.warn("Failed to init device state service from DB", e); } } }