Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:org.opendaylight.faas.fabric.general.FabricManagementAPIProvider.java

private String checkFabricOptions(final ComposeFabricInputBuilder input) {
    List<ServiceCapabilities> capabilities = null;
    if (input.getOptions() != null) {
        capabilities = input.getOptions().getCapabilitySupported();
    }/*from   w w w  . ja v  a2s.co m*/

    if (capabilities == null) {
        capabilities = Lists.newArrayList();
    }
    Set<ServiceCapabilities> allDevCapabilities = Sets.newHashSet();

    List<DeviceNodes> devices = input.getDeviceNodes();
    if (devices == null || devices.isEmpty()) {
        return "No device can support the capability of fabric.";
    }

    ReadOnlyTransaction rt = dataBroker.newReadOnlyTransaction();
    List<CheckedFuture<Optional<FabricCapableDevice>, ReadFailedException>> futures = Lists.newArrayList();
    for (DeviceNodes device : devices) {
        @SuppressWarnings("unchecked")
        InstanceIdentifier<Node> devIid = (InstanceIdentifier<Node>) device.getDeviceRef().getValue();
        CheckedFuture<Optional<FabricCapableDevice>, ReadFailedException> future = rt
                .read(LogicalDatastoreType.OPERATIONAL, devIid.augmentation(FabricCapableDevice.class));
        futures.add(future);
    }

    try {
        List<Optional<FabricCapableDevice>> capDevices = Futures.allAsList(futures).get();

        for (Optional<FabricCapableDevice> opt : capDevices) {
            if (opt.isPresent()) {
                FabricCapableDevice capDevice = opt.get();
                if (capDevice.getCapabilitySupported() != null) {
                    allDevCapabilities.addAll(capDevice.getCapabilitySupported());
                }
                if (capDevice.getSupportedFabric() != null) {
                    boolean supported = false;
                    switch (input.getType()) {
                    case VXLAN:
                        supported = capDevice.getSupportedFabric().contains(VxlanFabric.class);
                        break;
                    case VLAN:
                        supported = capDevice.getSupportedFabric().contains(VlanFabric.class);
                        break;
                    default:
                        break;
                    }
                    if (!supported) {
                        return String.format("Device does not support this fabric type.");
                    }
                } else {
                    return String.format("Device does not support this fabric type.");
                }
            } else {
                return String.format("Device is not a fabric capable device.");
            }
        }

        for (ServiceCapabilities cap : capabilities) {
            if (!allDevCapabilities.contains(cap)) {
                return String.format("No device can support this capability [%s].", cap.name());
            }
        }

        if (capabilities.isEmpty()) {
            OptionsBuilder builder = input.getOptions() == null ? new OptionsBuilder()
                    : new OptionsBuilder(input.getOptions());

            input.setOptions(builder.setCapabilitySupported(Lists.newArrayList(allDevCapabilities)).build());
        }
    } catch (InterruptedException | ExecutionException e) {
        LOG.error("", e);
        return "Exception ocurred when reading DomStore.";
    }
    return null;
}

From source file:com.facebook.buck.distributed.MaterializerDummyFileHashCache.java

public ListenableFuture<?> getMaterializationFuturesAsList() {
    return Futures.allAsList(fileMaterializationFuturesByFileHashEntry.values());
}

From source file:io.druid.server.lookup.cache.LookupCoordinatorManager.java

void updateNodes(Collection<URL> urls, final Map<String, Map<String, Object>> knownLookups)
        throws IOException, InterruptedException, ExecutionException {
    if (knownLookups == null) {
        LOG.debug("No config for lookups found");
        return;//from  w ww  . ja v  a  2s  .  co m
    }
    if (knownLookups.isEmpty()) {
        LOG.debug("No known lookups. Skipping update");
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Updating %d lookups on %d nodes", knownLookups.size(), urls.size());
    }
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    updateAllOnHost(url, knownLookups);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    LOG.warn("Update on [%s] interrupted", url);
                    throw Throwables.propagate(e);
                } catch (IOException | ExecutionException e) {
                    // Don't raise as ExecutionException. Just log and continue
                    LOG.makeAlert(e, "Error submitting to [%s]", url).emit();
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        LOG.warn("Timeout in updating hosts! Attempting to cancel");
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
    }
}

From source file:com.jivesoftware.os.tasmo.lib.process.traversal.InitiateTraversal.java

private void commitChanges(WrittenEventContext writtenEventContext, TenantIdAndCentricId tenantIdAndCentricId,
        List<ListenableFuture<List<ViewFieldChange>>> futures)
        throws InterruptedException, ExecutionException, CommitChangeException {
    ListenableFuture<List<List<ViewFieldChange>>> allAsList = Futures.allAsList(futures);
    List<ViewFieldChange> writeableChanges = new ArrayList<>();
    for (List<ViewFieldChange> changes : allAsList.get()) {
        writeableChanges.addAll(changes);
    }//w  ww.ja  v  a  2s . c  o  m
    writtenEventContext.getCommitChange().commitChange(writtenEventContext, tenantIdAndCentricId,
            writeableChanges);
}

From source file:org.hawkular.alerts.engine.impl.CassDefinitionsServiceImpl.java

private void insertTriggerActions(Trigger trigger) throws Exception {
    PreparedStatement insertTriggerActions = CassStatement.get(session, CassStatement.INSERT_TRIGGER_ACTIONS);
    if (insertTriggerActions == null) {
        throw new RuntimeException("insertTriggerActions PreparedStatement is null");
    }/*from ww w .  j a v a  2 s  . c  o m*/
    if (trigger.getActions() != null) {
        trigger.getActions().forEach(triggerAction -> {
            triggerAction.setTenantId(trigger.getTenantId());
        });
        List<ResultSetFuture> futures = new ArrayList<>();
        BatchStatement batch = new BatchStatement(batchType);
        int i = 0;
        for (TriggerAction triggerAction : trigger.getActions()) {
            batch.add(insertTriggerActions.bind(trigger.getTenantId(), trigger.getId(),
                    triggerAction.getActionPlugin(), triggerAction.getActionId(),
                    JsonUtil.toJson(triggerAction)));
            i += batch.size();
            if (i > batchSize) {
                futures.add(session.executeAsync(batch));
                batch.clear();
                i = 0;
            }
        }
        if (batch.size() > 0) {
            futures.add(session.executeAsync(batch));
        }
        Futures.allAsList(futures).get();
    }
}

From source file:com.rackspacecloud.blueflood.outputs.handlers.RollupHandler.java

/**
 * This method gets the points from the DB and then rolls them up according to the granularity.
 *
 * Breaks up the number of ranges into buckets based on ROLLUP_ON_READ_REPAIR_SIZE_PER_THREAD and executes
 * the buckets in parallel.//from w  w  w. ja v a  2s  . co m
 *
 * @param locator metric key within the DB
 * @param g the granularity
 * @param from the starting timestamp of the range (ms)
 * @param to the ending timestamp of the range (ms)
 *
 * @return a list of rolled-up points
 */
private List<Points.Point> repairRollupsOnRead(final Locator locator, Granularity g, long from, long to) {
    Timer.Context c = timerRepairRollupsOnRead.time();

    List<Points.Point> repairedPoints = new ArrayList<Points.Point>();
    List<ListenableFuture<List<Points.Point>>> futures = new ArrayList<ListenableFuture<List<Points.Point>>>();

    for (final Iterable<Range> ranges : divideRangesByGroup(g, from, to)) {
        futures.add(

                createRepairPointsExecutor.submit(new Callable() {

                    @Override
                    public List<Points.Point> call() throws Exception {
                        return createRepairPoints(ranges, locator);
                    }
                }));
    }

    ListenableFuture<List<List<Points.Point>>> aggregateFuture = Futures.allAsList(futures);

    try {
        for (List<Points.Point> subList : aggregateFuture.get(rollupOnReadTimeout.getValue(),
                rollupOnReadTimeout.getUnit())) {

            repairedPoints.addAll(subList);
        }
    } catch (Exception e) {
        aggregateFuture.cancel(true);
        exceededQueryTimeout.mark();
        log.warn("Exception encountered while doing rollups on read, incomplete rollups will be returned.", e);
    }

    c.stop();

    return repairedPoints;
}

From source file:org.opendaylight.mdsal.singleton.dom.impl.ClusterSingletonServiceGroupImpl.java

@SuppressWarnings("checkstyle:IllegalCatch")
private void lostOwnership() {
    LOG.debug("LostLeadership method for service Provider {}", clusterSingletonGroupIdentifier);
    boolean needReleaseLock = false;
    boolean needCloseProviderInstance = false;
    try {/* ww w  .ja  v  a2s .  co m*/
        clusterLock.acquire();
        needReleaseLock = true;
        final List<ListenableFuture<Void>> serviceCloseFutureList = new ArrayList<>();
        if (hasOwnership) {
            Verify.verify(asyncCloseEntityCandidateReg != null);
            for (final ClusterSingletonServiceRegistrationDelegator service : serviceGroup) {
                try {
                    serviceCloseFutureList.add(service.closeServiceInstance());
                } catch (final RuntimeException e) {
                    LOG.error("Unexpected exception while closing service: {}, resuming with next..",
                            service.getIdentifier());
                }
            }
            hasOwnership = false;
        }

        final ListenableFuture<List<Void>> destroyFuture = Futures.allAsList(serviceCloseFutureList);
        if (serviceEntityCandidateReg != null) {
            // we don't want to remove this instance from map
            Futures.addCallback(destroyFuture, newAsyncCloseCallback(clusterLock, false));
        } else {
            // we have to remove this ClusterSingletonServiceGroup instance from map
            Futures.addCallback(destroyFuture, newAsyncCloseCallback(clusterLock, true));
        }
        /*
         * We wish to stop all possible EOS activities before we don't close
         * a close candidate registration that acts as a guard. So we don't want
         * to release Semaphore (clusterLock) before we are not fully finished.
         * Semaphore lock release has to be realized as FutureCallback after a service
         * instance has fully closed prior to relinquishing service ownership.
         */
        needReleaseLock = false;
    } catch (final InterruptedException e) {
        LOG.error("Unexpected exception state for service Provider {} in LostLeadership",
                clusterSingletonGroupIdentifier, e);
        needCloseProviderInstance = true;
    } finally {
        closeResources(needReleaseLock, needCloseProviderInstance);
    }
}

From source file:com.facebook.buck.distributed.build_slave.CacheOptimizedBuildTargetsQueueFactory.java

/**
 * Create {@link BuildTargetsQueue} with the given parameters.
 *
 * @param targetsToBuild top-level targets that need to be built.
 * @return an instance of {@link BuildTargetsQueue} with the top-level targets at the root.
 *//*  w  ww.java 2s .  c o m*/
public ReverseDepBuildTargetsQueue createBuildTargetsQueue(Iterable<BuildTarget> targetsToBuild,
        CoordinatorBuildRuleEventsPublisher coordinatorBuildRuleEventsPublisher,
        int mostBuildRulesFinishedPercentageThreshold) {
    LOG.info("Starting to create the %s.", BuildTargetsQueue.class.getName());
    GraphTraversalData results = traverseGraphFromTopLevelUsingAvailableCaches(targetsToBuild);

    // Notify distributed build clients that they should not wait for any of the nodes that were
    // pruned (as they will never be built remotely)
    ImmutableList<String> prunedTargets = ImmutableList
            .copyOf(results.prunedRules.stream().filter(BuildRule::isCacheable) // Client always skips uncacheables
                    .map(BuildRule::getFullyQualifiedName).collect(Collectors.toList()));

    int numTotalCachableRules = results.visitedRules.size() - results.uncachableTargets.size()
            + prunedTargets.size();
    LOG.info(String.format("[%d/%d] cacheable build rules were pruned from graph.", prunedTargets.size(),
            numTotalCachableRules));
    coordinatorBuildRuleEventsPublisher.createBuildRuleStartedEvents(prunedTargets);
    coordinatorBuildRuleEventsPublisher.createBuildRuleCompletionEvents(prunedTargets);

    if (shouldBuildSelectedTargetsLocally) {
        // Consider all (transitively) 'buildLocally' rules as uncachable for DistBuild purposes - we
        // cannot build them remotely and, hence, we cannot put them in cache for local client to
        // consume.
        // NOTE: this needs to be after uncacheability property is used for graph nodes visiting (and,
        // hence, pruning and scheduling) - we want caches to be checked for these rules while doing
        // the visiting (local build could have uploaded artifacts for these rules).
        ImmutableList<String> transitiveBuildLocallyTargets = ImmutableList
                .copyOf(findTransitiveBuildLocallyTargets(results));
        results.uncachableTargets.addAll(transitiveBuildLocallyTargets);
        // Unlock all rules which will not be built remotely so that local client does not get stuck
        // waiting for them (some of them may be cachable from client point of view). DO NOT use
        // completed/finished events as we are building deps of these rules remotely.
        coordinatorBuildRuleEventsPublisher.createBuildRuleUnlockedEvents(transitiveBuildLocallyTargets);
    }

    // Do the reference counting and create the EnqueuedTargets.
    ImmutableSet.Builder<DistributableNode> zeroDependencyNodes = ImmutableSet.builder();
    ImmutableMap.Builder<String, DistributableNode> allNodes = ImmutableMap.builder();
    for (BuildRule buildRule : results.visitedRules) {
        String target = buildRule.getFullyQualifiedName();
        Iterable<String> currentRevDeps;
        if (results.allReverseDeps.containsKey(target)) {
            currentRevDeps = results.allReverseDeps.get(target);
        } else {
            currentRevDeps = new ArrayList<>();
        }

        DistributableNode distributableNode = new DistributableNode(target, ImmutableSet.copyOf(currentRevDeps),
                ImmutableSet.copyOf(Objects.requireNonNull(results.allForwardDeps.get(target))),
                results.uncachableTargets.contains(target));
        allNodes.put(target, distributableNode);

        if (distributableNode.areAllDependenciesResolved()) {
            zeroDependencyNodes.add(distributableNode);
        }
    }

    // Wait for local uploads (in case of local coordinator) to finish.
    try {
        LOG.info("Waiting for cache uploads to finish.");
        Futures.allAsList(artifactCache.getAllUploadRuleFutures()).get();
    } catch (InterruptedException | ExecutionException e) {
        LOG.error(e, "Failed to upload artifacts from the local cache.");
    }

    return new ReverseDepBuildTargetsQueue(
            new DistributableBuildGraph(allNodes.build(), zeroDependencyNodes.build()),
            mostBuildRulesFinishedPercentageThreshold);
}

From source file:com.facebook.buck.distributed.DistBuildService.java

public ListenableFuture<Void> uploadBuckDotFiles(final BuildId id, final ProjectFilesystem filesystem,
        FileHashCache fileHashCache, ListeningExecutorService executorService) throws IOException {
    ListenableFuture<Pair<List<FileInfo>, List<PathInfo>>> filesFuture = executorService
            .submit(new Callable<Pair<List<FileInfo>, List<PathInfo>>>() {
                @Override//from  ww  w .  j  a v a2 s  .  c  o m
                public Pair<List<FileInfo>, List<PathInfo>> call() throws IOException {

                    Path[] buckDotFilesExceptConfig = Arrays.stream(filesystem.listFiles(Paths.get(".")))
                            .filter(f -> !f.isDirectory()).filter(f -> !Files.isSymbolicLink(f.toPath()))
                            .filter(f -> f.getName().startsWith(".")).filter(f -> f.getName().contains("buck"))
                            .filter(f -> !f.getName().startsWith(".buckconfig")).map(f -> f.toPath())
                            .toArray(Path[]::new);

                    List<FileInfo> fileEntriesToUpload = new LinkedList<>();
                    List<PathInfo> pathEntriesToUpload = new LinkedList<>();
                    for (Path path : buckDotFilesExceptConfig) {
                        FileInfo fileInfoObject = new FileInfo();
                        fileInfoObject.setContent(filesystem.readFileIfItExists(path).get().getBytes());
                        fileInfoObject.setContentHash(fileHashCache.get(path.toAbsolutePath()).toString());
                        fileEntriesToUpload.add(fileInfoObject);

                        PathInfo pathInfoObject = new PathInfo();
                        pathInfoObject.setPath(path.toString());
                        pathInfoObject.setContentHash(fileHashCache.get(path.toAbsolutePath()).toString());
                        pathEntriesToUpload.add(pathInfoObject);
                    }

                    return new Pair<List<FileInfo>, List<PathInfo>>(fileEntriesToUpload, pathEntriesToUpload);
                }
            });

    ListenableFuture<Void> setFilesFuture = Futures.transformAsync(filesFuture,
            new AsyncFunction<Pair<List<FileInfo>, List<PathInfo>>, Void>() {
                @Override
                public ListenableFuture<Void> apply(
                        @Nullable Pair<List<FileInfo>, List<PathInfo>> filesAndPaths) throws IOException {
                    setBuckDotFiles(id, filesAndPaths.getSecond());
                    return Futures.immediateFuture(null);
                }
            }, executorService);

    ListenableFuture<Void> uploadFilesFuture = Futures.transformAsync(filesFuture,
            new AsyncFunction<Pair<List<FileInfo>, List<PathInfo>>, Void>() {
                @Override
                public ListenableFuture<Void> apply(
                        @Nullable Pair<List<FileInfo>, List<PathInfo>> filesAndPaths) throws Exception {
                    uploadMissingFilesFromList(filesAndPaths.getFirst(), executorService);
                    return Futures.immediateFuture(null);
                }
            }, executorService);

    return Futures.transform(Futures.allAsList(ImmutableList.of(setFilesFuture, uploadFilesFuture)),
            new Function<List<Void>, Void>() {
                @Nullable
                @Override
                public Void apply(@Nullable List<Void> input) {
                    return null;
                }
            });
}

From source file:org.hawkular.alerts.engine.impl.CassAlertsServiceImpl.java

@Override
public void addEventTags(String tenantId, Collection<String> eventIds, Map<String, String> tags)
        throws Exception {
    if (isEmpty(tenantId)) {
        throw new IllegalArgumentException("TenantId must be not null");
    }/*ww  w  .  ja va  2  s .c o m*/
    if (isEmpty(eventIds)) {
        throw new IllegalArgumentException("EventIds must be not null");
    }
    if (isEmpty(tags)) {
        throw new IllegalArgumentException("Tags must be not null");
    }

    // Only tag existing events
    EventsCriteria criteria = new EventsCriteria();
    criteria.setEventIds(eventIds);
    Page<Event> existingEvents = getEvents(tenantId, criteria, null);

    PreparedStatement updateEvent = CassStatement.get(session, CassStatement.UPDATE_EVENT);
    PreparedStatement insertTag = CassStatement.get(session, CassStatement.INSERT_TAG);

    try {
        List<ResultSetFuture> futures = new ArrayList<>();
        BatchStatement batch = new BatchStatement(batchType);
        int i = 0;
        for (Event e : existingEvents) {
            tags.entrySet().stream().forEach(tag -> {
                e.addTag(tag.getKey(), tag.getValue());
                batch.add(insertTag.bind(tenantId, TagType.EVENT.name(), tag.getKey(), tag.getValue(),
                        e.getId()));
            });
            batch.add(updateEvent.bind(JsonUtil.toJson(e), tenantId, e.getId()));

        }
        if (batch.size() > 0) {
            futures.add(session.executeAsync(batch));
        }
        Futures.allAsList(futures).get();

    } catch (Exception e) {
        msgLog.errorDatabaseException(e.getMessage());
        throw e;
    }
}