Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:com.google.gerrit.server.update.ReviewDbBatchUpdate.java

private List<ChangeTask> executeChangeOps(boolean parallel, boolean dryrun)
        throws UpdateException, RestApiException {
    List<ChangeTask> tasks;
    boolean success = false;
    Stopwatch sw = Stopwatch.createStarted();
    try {/*from ww  w .j  ava  2 s .c  o m*/
        logDebug("Executing change ops (parallel? {})", parallel);
        ListeningExecutorService executor = parallel ? changeUpdateExector
                : MoreExecutors.newDirectExecutorService();

        tasks = new ArrayList<>(ops.keySet().size());
        try {
            if (notesMigration.commitChangeWrites() && repoView != null) {
                // A NoteDb change may have been rebuilt since the repo was originally
                // opened, so make sure we see that.
                logDebug("Preemptively scanning for repo changes");
                repoView.getRepository().scanForRepoChanges();
            }
            if (!ops.isEmpty() && notesMigration.failChangeWrites()) {
                // Fail fast before attempting any writes if changes are read-only, as
                // this is a programmer error.
                logDebug("Failing early due to read-only Changes table");
                throw new OrmException(NoteDbUpdateManager.CHANGES_READ_ONLY);
            }
            List<ListenableFuture<?>> futures = new ArrayList<>(ops.keySet().size());
            for (Map.Entry<Change.Id, Collection<BatchUpdateOp>> e : ops.asMap().entrySet()) {
                ChangeTask task = new ChangeTask(e.getKey(), e.getValue(), Thread.currentThread(), dryrun);
                tasks.add(task);
                if (!parallel) {
                    logDebug("Direct execution of task for ops: {}", ops);
                }
                futures.add(executor.submit(task));
            }
            if (parallel) {
                logDebug("Waiting on futures for {} ops spanning {} changes", ops.size(), ops.keySet().size());
            }
            Futures.allAsList(futures).get();

            if (notesMigration.commitChangeWrites()) {
                if (!dryrun) {
                    executeNoteDbUpdates(tasks);
                }
            }
            success = true;
        } catch (ExecutionException | InterruptedException e) {
            Throwables.throwIfInstanceOf(e.getCause(), UpdateException.class);
            Throwables.throwIfInstanceOf(e.getCause(), RestApiException.class);
            throw new UpdateException(e);
        } catch (OrmException | IOException e) {
            throw new UpdateException(e);
        }
    } finally {
        metrics.executeChangeOpsLatency.record(success, sw.elapsed(NANOSECONDS), NANOSECONDS);
    }
    return tasks;
}

From source file:org.hawkular.alerts.engine.impl.CassAlertsServiceImpl.java

@Override
public void removeAlertTags(String tenantId, Collection<String> alertIds, Collection<String> tags)
        throws Exception {
    if (isEmpty(tenantId)) {
        throw new IllegalArgumentException("TenantId must be not null");
    }/*from   w  w  w.j  a v a2s  .  co m*/
    if (isEmpty(alertIds)) {
        throw new IllegalArgumentException("AlertIds must be not null");
    }
    if (isEmpty(tags)) {
        throw new IllegalArgumentException("Tags must be not null");
    }

    // Only untag existing alerts
    AlertsCriteria criteria = new AlertsCriteria();
    criteria.setAlertIds(alertIds);
    Page<Alert> existingAlerts = getAlerts(tenantId, criteria, null);

    PreparedStatement updateAlert = CassStatement.get(session, CassStatement.UPDATE_ALERT);
    PreparedStatement deleteTag = CassStatement.get(session, CassStatement.DELETE_TAG);

    try {
        List<ResultSetFuture> futures = new ArrayList<>();
        BatchStatement batch = new BatchStatement(batchType);
        int i = 0;
        for (Alert a : existingAlerts) {
            tags.stream().forEach(tag -> {
                if (a.getTags().containsKey(tag)) {
                    batch.add(deleteTag.bind(tenantId, TagType.ALERT.name(), tag, a.getTags().get(tag),
                            a.getId()));
                    a.removeTag(tag);
                }
            });
            batch.add(updateAlert.bind(JsonUtil.toJson(a), tenantId, a.getAlertId()));
            i += batch.size();
            if (i > batchSize) {
                futures.add(session.executeAsync(batch));
                batch.clear();
                i = 0;
            }
        }
        if (batch.size() > 0) {
            futures.add(session.executeAsync(batch));
        }
        Futures.allAsList(futures).get();

    } catch (Exception e) {
        msgLog.errorDatabaseException(e.getMessage());
        throw e;
    }
}

From source file:com.ning.maven.plugins.dependencyversionscheck.AbstractDependencyVersionsMojo.java

/**
 * Creates a map of all version resolutions used in this project in a given scope. The result is a map from artifactName to a list of version numbers used in the project, based on the element
 * requesting// w w  w  . j a v a 2s .co  m
 * the version.
 *
 * If the special scope "null" is used, a superset of all scopes is used (this is used by the check mojo).
 */
protected Map buildResolutionMap(final String scope)
        throws MojoExecutionException, InvalidDependencyVersionException, ProjectBuildingException,
        ArtifactResolutionException, ArtifactNotFoundException {
    final String[] visibleScopes = (String[]) VISIBLE_SCOPES.get(scope);
    final String[] transitiveScopes = (String[]) TRANSITIVE_SCOPES.get(scope);

    if (visibleScopes == null) {
        throw new MojoExecutionException("No valid scopes found for '" + scope + "'");
    }

    // Map from artifactName --> list of resolutions found on the tree
    final SortedMap resolutionMap = Collections.synchronizedSortedMap(new TreeMap());
    final List futures = new ArrayList();
    LOG.debug("Using parallel dependency resolution: " + useParallelDependencyResolution);

    for (final Iterator iter = project.getDependencies().iterator(); iter.hasNext();) {
        final Dependency dependency = (Dependency) iter.next();

        if (useParallelDependencyResolution) {
            futures.add(executorService.submit(new Runnable() {
                public void run() {
                    try {
                        updateResolutionMapForDep(visibleScopes, transitiveScopes, resolutionMap, dependency);
                    } catch (Exception e) {
                        Throwables.propagate(e);
                    }
                }
            }));
        } else {
            updateResolutionMapForDep(visibleScopes, transitiveScopes, resolutionMap, dependency);
        }
    }
    if (useParallelDependencyResolution) {
        try {
            Futures.allAsList(futures).get();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            Throwables.propagate(e);
        } catch (ExecutionException e) {
            Throwables.propagate(e);
        }
    }
    return resolutionMap;
}

From source file:org.apache.druid.segment.realtime.appenderator.BaseAppenderatorDriver.java

/**
 * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming
 * ingestion) before being dropped.//from w  w  w  .j av a 2  s  . c om
 *
 * @param segmentsAndMetadata result of pushing or publishing
 *
 * @return a future for dropping segments
 */
ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata) {
    log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments());
    final ListenableFuture<?> dropFuture = Futures.allAsList(segmentsAndMetadata.getSegments().stream()
            .map(segment -> appenderator.drop(SegmentIdentifier.fromDataSegment(segment)))
            .collect(Collectors.toList()));

    return Futures.transform(dropFuture, (Function<Object, SegmentsAndMetadata>) x -> {
        final Object metadata = segmentsAndMetadata.getCommitMetadata();
        return new SegmentsAndMetadata(segmentsAndMetadata.getSegments(),
                metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata());
    });
}

From source file:com.facebook.buck.core.build.engine.impl.CachingBuildEngine.java

public ListenableFuture<?> walkRule(BuildRule rule, Set<BuildRule> seen) {
    return Futures.transformAsync(Futures.immediateFuture(ruleDeps.get(rule)), deps -> {
        List<ListenableFuture<?>> results1 = new ArrayList<>(SortedSets.sizeEstimate(deps));
        for (BuildRule dep : deps) {
            if (seen.add(dep)) {
                results1.add(walkRule(dep, seen));
            }//from ww w .j a va  2  s . c o m
        }
        return Futures.allAsList(results1);
    }, serviceByAdjustingDefaultWeightsTo(SCHEDULING_MORE_WORK_RESOURCE_AMOUNTS));
}

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.strategy.SyncPlanPushStrategyIncrementalImpl.java

ListenableFuture<RpcResult<Void>> updateTableFeatures(final InstanceIdentifier<FlowCapableNode> nodeIdent,
        final FlowCapableNode flowCapableNodeConfigured) {
    // CHECK if while pushing the update, updateTableInput can be null to emulate a table add
    final List<Table> tableList = ReconcileUtil.safeTables(flowCapableNodeConfigured);

    final List<ListenableFuture<RpcResult<UpdateTableOutput>>> allResults = new ArrayList<>();
    for (Table table : tableList) {
        TableKey tableKey = table.getKey();
        KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII = nodeIdent
                .child(TableFeatures.class, new TableFeaturesKey(tableKey.getId()));
        List<TableFeatures> tableFeatures = flowCapableNodeConfigured.getTableFeatures();
        if (tableFeatures != null) {
            for (TableFeatures tableFeaturesItem : tableFeatures) {
                // TODO uncomment java.lang.NullPointerException
                // at
                // org.opendaylight.openflowjava.protocol.impl.serialization.match.AbstractOxmMatchEntrySerializer.serializeHeader(AbstractOxmMatchEntrySerializer.java:31
                // allResults.add(JdkFutureAdapters.listenInPoolThread(
                // tableForwarder.update(tableFeaturesII, null, tableFeaturesItem, nodeIdent)));
            }//from w ww .j  a v a  2  s.co m
        }
    }

    final ListenableFuture<RpcResult<Void>> singleVoidResult = Futures.transform(Futures.allAsList(allResults),
            ReconcileUtil.<UpdateTableOutput>createRpcResultCondenser("table update"));

    return Futures.transform(singleVoidResult,
            ReconcileUtil.chainBarrierFlush(PathUtil.digNodePath(nodeIdent), transactionService));
}

From source file:org.hawkular.alerts.engine.impl.CassActionsServiceImpl.java

@Override
public Page<Action> getActions(String tenantId, ActionsCriteria criteria, Pager pager) throws Exception {
    if (isEmpty(tenantId)) {
        throw new IllegalArgumentException("TenantId must be not null");
    }//from  w  w w.  j  a  va 2 s . co  m
    boolean thin = (null != criteria && criteria.isThin());
    boolean filter = (null != criteria && criteria.hasCriteria());

    List<Action> actions = new ArrayList<>();
    Set<ActionHistoryPK> actionPks = new HashSet<>();

    if (filter) {
        /*
         * Get Action PKs filtered by ctime
         */
        Set<ActionHistoryPK> actionPKsfilteredByCtime = new HashSet<>();
        boolean filterByCtime = filterByCtime(tenantId, actionPKsfilteredByCtime, criteria);
        if (filterByCtime) {
            actionPks.addAll(actionPKsfilteredByCtime);
            if (actionPks.isEmpty()) {
                return new Page<>(actions, pager, 0);
            }
        }

        /*
         * Get Action PKs filtered by actionPlugin
         */
        Set<ActionHistoryPK> actionPKsfilteredByActionPlugin = new HashSet<>();
        boolean filterByActionPlugin = filterByActionPlugin(tenantId, actionPKsfilteredByActionPlugin,
                criteria);
        if (filterByActionPlugin) {
            if (actionPks.isEmpty()) {
                actionPks.addAll(actionPKsfilteredByActionPlugin);
            } else {
                actionPks.retainAll(actionPKsfilteredByActionPlugin);
            }
            if (actionPks.isEmpty()) {
                return new Page<>(actions, pager, 0);
            }
        }

        /*
         * Get Action PKs filtered by actionId
         */
        Set<ActionHistoryPK> actionPKsfilteredByActionId = new HashSet<>();
        boolean filterByActionId = filterByActionId(tenantId, actionPKsfilteredByActionId, criteria);
        if (filterByActionId) {
            if (actionPks.isEmpty()) {
                actionPks.addAll(actionPKsfilteredByActionId);
            } else {
                actionPks.retainAll(actionPKsfilteredByActionId);
            }
            if (actionPks.isEmpty()) {
                return new Page<>(actions, pager, 0);
            }
        }

        /*
         * Get Action PKs filtered by alertId
         */
        Set<ActionHistoryPK> actionPKsfilteredByAlertId = new HashSet<>();
        boolean filterByAlertId = filterByAlertId(tenantId, actionPKsfilteredByAlertId, criteria);
        if (filterByAlertId) {
            if (actionPks.isEmpty()) {
                actionPks.addAll(actionPKsfilteredByAlertId);
            } else {
                actionPks.retainAll(actionPKsfilteredByAlertId);
            }
            if (actionPks.isEmpty()) {
                return new Page<>(actions, pager, 0);
            }
        }

        /*
         * Get Action PKs filtered by result
         */
        Set<ActionHistoryPK> actionPKsfilteredByResult = new HashSet<>();
        boolean filterByResult = filterByResult(tenantId, actionPKsfilteredByResult, criteria);
        if (filterByResult) {
            if (actionPks.isEmpty()) {
                actionPks.addAll(actionPKsfilteredByResult);
            } else {
                actionPks.retainAll(actionPKsfilteredByResult);
            }
            if (actionPks.isEmpty()) {
                return new Page<>(actions, pager, 0);
            }
        }
    }

    if (!filter) {
        /*
         * Get all actions
         */
        PreparedStatement selectActionHistoryByTenant = CassStatement.get(session,
                CassStatement.SELECT_ACTION_HISTORY_BY_TENANT);
        ResultSet rsActionHistoryByTenant = session.execute(selectActionHistoryByTenant.bind(tenantId));
        Iterator<Row> itActionHistoryByTenant = rsActionHistoryByTenant.iterator();
        while (itActionHistoryByTenant.hasNext()) {
            Row row = itActionHistoryByTenant.next();
            Action actionHistory = JsonUtil.fromJson(row.getString("payload"), Action.class, thin);
            actions.add(actionHistory);
        }
    } else {
        PreparedStatement selectActionHistory = CassStatement.get(session, CassStatement.SELECT_ACTION_HISTORY);
        List<ResultSetFuture> futures = actionPks.stream()
                .map(actionPk -> session.executeAsync(selectActionHistory.bind(actionPk.tenantId,
                        actionPk.actionPlugin, actionPk.actionId, actionPk.alertId, actionPk.ctime)))
                .collect(Collectors.toList());
        List<ResultSet> rsActionHistory = Futures.allAsList(futures).get();
        rsActionHistory.stream().forEach(r -> {
            for (Row row : r) {
                Action actionHistory = JsonUtil.fromJson(row.getString("payload"), Action.class, thin);
                actions.add(actionHistory);
            }
        });
    }

    return preparePage(actions, pager);
}

From source file:org.opendaylight.vbd.impl.VbdBridgeDomain.java

private ListenableFuture<Void> handleUpdatedModifiedNodes(final HashMap<TerminationPoint, Node> modifiedNodes) {
    final List<ListenableFuture<Void>> cumulativeTask = new ArrayList<>();
    modifiedNodes.forEach((terminationPoint, node) -> {
        if (terminationPoint != null
                && terminationPoint.getAugmentation(TerminationPointVbridgeAugment.class) != null
                && node.getNodeId() != null) {
            final TerminationPointVbridgeAugment termPointVbridgeAug = terminationPoint
                    .getAugmentation(TerminationPointVbridgeAugment.class);
            final Collection<KeyedInstanceIdentifier<Node, NodeKey>> instanceIdentifiersVPP = nodesToVpps
                    .get(node.getNodeId());
            //TODO: probably iterate via all instance identifiers.
            if (!instanceIdentifiersVPP.isEmpty()) {
                final DataBroker dataBroker = VbdUtil
                        .resolveDataBrokerForMountPoint(instanceIdentifiersVPP.iterator().next(), mountService);
                cumulativeTask//from  w w  w  .j a  v a 2  s  .co  m
                        .add(vppModifier.addInterfaceToBridgeDomainOnVpp(dataBroker, termPointVbridgeAug));
            }
        }
    });
    final ListenableFuture<List<Void>> completedCumulativeTask = Futures.allAsList(cumulativeTask);
    return transform(completedCumulativeTask);
}

From source file:org.hawkular.alerts.engine.impl.CassDefinitionsServiceImpl.java

private void removeTrigger(Trigger trigger) throws Exception {
    String tenantId = trigger.getTenantId();
    String triggerId = trigger.getId();

    PreparedStatement deleteDampenings = CassStatement.get(session, CassStatement.DELETE_DAMPENINGS);
    PreparedStatement deleteConditions = CassStatement.get(session, CassStatement.DELETE_CONDITIONS);
    PreparedStatement deleteActions = CassStatement.get(session, CassStatement.DELETE_TRIGGER_ACTIONS);
    PreparedStatement deleteTrigger = CassStatement.get(session, CassStatement.DELETE_TRIGGER);

    if (deleteDampenings == null || deleteConditions == null || deleteActions == null
            || deleteTrigger == null) {//from   w w  w.ja va2s .c o  m
        throw new RuntimeException("delete*Triggers PreparedStatement is null");
    }

    try {
        deleteTags(tenantId, TagType.TRIGGER, triggerId, trigger.getTags());
        deleteTriggerActions(tenantId, triggerId);
        List<ResultSetFuture> futures = new ArrayList<>();
        futures.add(session.executeAsync(deleteDampenings.bind(tenantId, triggerId)));
        futures.add(session.executeAsync(deleteConditions.bind(tenantId, triggerId)));
        futures.add(session.executeAsync(deleteActions.bind(tenantId, triggerId)));
        futures.add(session.executeAsync(deleteTrigger.bind(tenantId, triggerId)));
        Futures.allAsList(futures).get();
    } catch (Exception e) {
        msgLog.errorDatabaseException(e.getMessage());
        throw e;
    }

    /*
    Trigger should be removed from the alerts engine.
     */
    if (null != alertsEngine) {
        alertsEngine.removeTrigger(tenantId, triggerId);
    }

    notifyListeners(new DefinitionsEvent(Type.TRIGGER_REMOVE, tenantId, triggerId, trigger.getTags()));
}

From source file:com.facebook.buck.core.build.engine.impl.CachingBuildEngine.java

@Override
public int getNumRulesToBuild(Iterable<BuildRule> rules) {
    Set<BuildRule> seen = Sets.newConcurrentHashSet();
    ImmutableList.Builder<ListenableFuture<?>> results = ImmutableList.builder();
    for (BuildRule rule : rules) {
        if (seen.add(rule)) {
            results.add(walkRule(rule, seen));
        }/*from   w  w w  . j  a  v  a 2  s .  c  o m*/
    }
    Futures.getUnchecked(Futures.allAsList(results.build()));
    return seen.size();
}