Example usage for com.google.common.util.concurrent Futures successfulAsList

List of usage examples for com.google.common.util.concurrent Futures successfulAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures successfulAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> successfulAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its successful input futures.

Usage

From source file:com.microsoft.intellij.helpers.o365.Office365ManagerImpl.java

private <E extends DirectoryObject, F extends ODataEntityFetcher<E, ? extends DirectoryObjectOperations>, O extends ODataOperations> ListenableFuture<List<E>> getAllObjects(
        final ODataCollectionFetcher<E, F, O> fetcher) {

    return Futures.transform(fetcher.read(), new AsyncFunction<List<E>, List<E>>() {
        @Override/*from www . j  a va2  s  .c  om*/
        public ListenableFuture<List<E>> apply(List<E> entities) throws Exception {
            return Futures.successfulAsList(
                    Lists.transform(entities, new Function<E, ListenableFuture<? extends E>>() {
                        @Override
                        public ListenableFuture<? extends E> apply(E e) {
                            return fetcher.getById(e.getobjectId()).read();
                        }
                    }));
        }
    });
}

From source file:com.netflix.metacat.connector.hive.sql.DirectSqlGetPartition.java

private List<PartitionHolder> getPartitions(final String databaseName, final String tableName,
        @Nullable final List<String> partitionIds, @Nullable final String filterExpression,
        @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean includePartitionDetails,
        final boolean forceDisableAudit) {
    final FilterPartition filter = new FilterPartition();
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression)
            && filterExpression.contains(FIELD_BATCHID);
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression)
            && filterExpression.contains(FIELD_DATE_CREATED);
    // Handler for reading the result set
    final ResultSetExtractor<List<PartitionHolder>> handler = rs -> {
        final List<PartitionHolder> result = Lists.newArrayList();
        while (rs.next()) {
            final String name = rs.getString("name");
            final String uri = rs.getString("uri");
            final long createdDate = rs.getLong(FIELD_DATE_CREATED);
            Map<String, String> values = null;
            if (hasDateCreated) {
                values = Maps.newHashMap();
                values.put(FIELD_DATE_CREATED, createdDate + "");
            }/*w  w w .  j  a  v a2 s  . co m*/
            if (Strings.isNullOrEmpty(filterExpression)
                    || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                final Long id = rs.getLong("id");
                final Long sdId = rs.getLong("sd_id");
                final Long serdeId = rs.getLong("serde_id");
                final String inputFormat = rs.getString("input_format");
                final String outputFormat = rs.getString("output_format");
                final String serializationLib = rs.getString("slib");
                final StorageInfo storageInfo = new StorageInfo();
                storageInfo.setUri(uri);
                storageInfo.setInputFormat(inputFormat);
                storageInfo.setOutputFormat(outputFormat);
                storageInfo.setSerializationLib(serializationLib);
                final AuditInfo auditInfo = new AuditInfo();
                auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));

                result.add(new PartitionHolder(id, sdId, serdeId,
                        PartitionInfo.builder()
                                .name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name))
                                .auditInfo(auditInfo).serde(storageInfo).build()));
            }
        }
        return result;
    };

    final List<PartitionHolder> partitions = this.getHandlerResults(databaseName, tableName, filterExpression,
            partitionIds, SQL.SQL_GET_PARTITIONS, handler, sort, pageable, forceDisableAudit);

    if (includePartitionDetails && !partitions.isEmpty()) {
        final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
        for (PartitionHolder partitionHolder : partitions) {
            partIds.add(partitionHolder.getId());
            sdIds.add(partitionHolder.getSdId());
            serdeIds.add(partitionHolder.getSerdeId());
        }
        final List<ListenableFuture<Void>> futures = Lists.newArrayList();
        final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
        futures.add(threadServiceManager.getExecutor().submit(
                () -> populateParameters(partIds, SQL.SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));

        final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
        if (!sdIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor()
                    .submit(() -> populateParameters(sdIds, SQL.SQL_GET_SD_PARAMS, "sd_id", sdParams)));
        }
        final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
        if (!serdeIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(
                    () -> populateParameters(serdeIds, SQL.SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
        }
        try {
            Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1,
                    TimeUnit.HOURS);
        } catch (Exception e) {
            Throwables.propagate(e);
        }

        for (PartitionHolder partitionHolder : partitions) {
            partitionHolder.getPartitionInfo().setMetadata(partitionParams.get(partitionHolder.getId()));
            partitionHolder.getPartitionInfo().getSerde()
                    .setParameters(sdParams.get(partitionHolder.getSdId()));
            partitionHolder.getPartitionInfo().getSerde()
                    .setSerdeInfoParameters(serdeParams.get(partitionHolder.getSerdeId()));

        }
    }
    return partitions;
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java

@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processCatalogs(final List<String> catalogNames) {
    log.info("Start: Full refresh of catalogs: {}", catalogNames);
    final List<ListenableFuture<CatalogDto>> getCatalogFutures = catalogNames.stream()
            .map(catalogName -> service.submit(() -> {
                CatalogDto result = null;
                try {
                    result = getCatalog(catalogName);
                } catch (Exception e) {
                    log.error("Failed to retrieve catalog: {}", catalogName);
                    elasticSearchUtil.log("ElasticSearchMetacatRefresh.getCatalog",
                            ElasticSearchDoc.Type.catalog.name(), catalogName, null, e.getMessage(), e, true);
                }//from   ww w.ja  va2 s  . c o  m
                return result;
            })).collect(Collectors.toList());
    return Futures.transformAsync(Futures.successfulAsList(getCatalogFutures), input -> {
        final List<ListenableFuture<Void>> processCatalogFutures = input.stream().filter(NOT_NULL)
                .map(catalogDto -> {
                    final List<QualifiedName> databaseNames = getDatabaseNamesToRefresh(catalogDto);
                    return _processDatabases(catalogDto.getName(), databaseNames);
                }).filter(NOT_NULL).collect(Collectors.toList());
        return Futures.transform(Futures.successfulAsList(processCatalogFutures), Functions.constant(null));
    });
}

From source file:org.thingsboard.server.dao.relation.BaseRelationService.java

@Override
public ListenableFuture<List<EntityRelationInfo>> findInfoByQuery(TenantId tenantId,
        EntityRelationsQuery query) {/*from  w w  w  .  j  a  va2 s  . co m*/
    log.trace("Executing findInfoByQuery [{}]", query);
    ListenableFuture<List<EntityRelation>> relations = findByQuery(tenantId, query);
    EntitySearchDirection direction = query.getParameters().getDirection();
    return Futures.transformAsync(relations, relations1 -> {
        List<ListenableFuture<EntityRelationInfo>> futures = new ArrayList<>();
        relations1.forEach(relation -> futures.add(fetchRelationInfoAsync(tenantId, relation,
                relation2 -> direction == EntitySearchDirection.FROM ? relation2.getTo() : relation2.getFrom(),
                (EntityRelationInfo relationInfo, String entityName) -> {
                    if (direction == EntitySearchDirection.FROM) {
                        relationInfo.setToName(entityName);
                    } else {
                        relationInfo.setFromName(entityName);
                    }
                })));
        return Futures.successfulAsList(futures);
    });
}

From source file:org.rhq.enterprise.server.cloud.StorageNodeManagerBean.java

@Override
@RequiredPermission(Permission.MANAGE_SETTINGS)
public ListenableFuture<List<StorageNodeLoadComposite>> getLoadAsync(Subject subject, StorageNode node,
        long beginTime, long endTime) {
    Stopwatch stopwatch = stopwatchStart();
    final StorageNodeLoadComposite result = new StorageNodeLoadComposite(node, beginTime, endTime);
    try {//from   w  w  w.  j  ava 2 s.  c o  m
        if (!storageClientManager.isClusterAvailable()) {
            return Futures.successfulAsList(Lists.newArrayList(Futures.immediateFuture(result)));
        }
        int storageNodeResourceId;
        try {
            storageNodeResourceId = getResourceIdFromStorageNode(node);
        } catch (ResourceNotFoundException e) {
            log.warn(e.getMessage());
            return Futures.successfulAsList(Lists.newArrayList(Futures.immediateFuture(result)));
        }
        try {
            final String host = InetAddress.getByName(node.getAddress()).getCanonicalHostName();
            if (!node.getAddress().equals(host)) {
                result.setHostname(host + " (" + node.getAddress() + ")");
            }
        } catch (UnknownHostException e) {
        }
        MetricsServer metricsServer = storageClientManager.getMetricsServer();
        Map<String, Integer> scheduleIdsMap = new HashMap<String, Integer>();

        for (Object[] tupple : getChildrenScheduleIds(storageNodeResourceId, true)) {
            String definitionName = (String) tupple[0];
            Integer scheduleId = (Integer) tupple[2];
            scheduleIdsMap.put(definitionName, scheduleId);
        }
        for (Object[] tupple : getGrandchildrenScheduleIds(storageNodeResourceId, true)) {
            String definitionName = (String) tupple[0];
            Integer scheduleId = (Integer) tupple[2];
            scheduleIdsMap.put(definitionName, scheduleId);
        }

        List<ListenableFuture<StorageNodeLoadComposite>> compositeFutures = new ArrayList<ListenableFuture<StorageNodeLoadComposite>>();
        final MeasurementAggregate totalDiskUsedAggregate = new MeasurementAggregate(0d, 0d, 0d);
        Integer scheduleId = null;

        // find the aggregates and enrich the result instance
        if (scheduleIdsMap.isEmpty()) {
            // no sheduled metrics yet
            return Futures.successfulAsList(Lists.newArrayList(Futures.immediateFuture(result)));
        }

        if ((scheduleId = scheduleIdsMap.get(METRIC_FREE_DISK_TO_DATA_RATIO)) != null) {
            ListenableFuture<AggregateNumericMetric> dataFuture = metricsServer
                    .getSummaryAggregateAsync(scheduleId, beginTime, endTime);
            ListenableFuture<StorageNodeLoadComposite> compositeFuture = Futures.transform(dataFuture,
                    new Function<AggregateNumericMetric, StorageNodeLoadComposite>() {
                        @Override
                        public StorageNodeLoadComposite apply(AggregateNumericMetric metric) {
                            result.setFreeDiskToDataSizeRatio(new MeasurementAggregate(metric.getMin(),
                                    metric.getAvg(), metric.getMax()));
                            return result;
                        }
                    });
            compositeFutures.add(wrapFuture(compositeFuture, result,
                    "Failed to retrieve metric [" + METRIC_FREE_DISK_TO_DATA_RATIO + "] data for " + node));
        }
        if ((scheduleId = scheduleIdsMap.get(METRIC_HEAP_USED_PERCENTAGE)) != null) {
            ListenableFuture<StorageNodeLoadComposite.MeasurementAggregateWithUnits> dataFuture = getMeasurementAggregateWithUnitsAsync(
                    scheduleId, MeasurementUnits.PERCENTAGE, beginTime, endTime);
            ListenableFuture<StorageNodeLoadComposite> compositeFuture = Futures.transform(dataFuture,
                    new Function<StorageNodeLoadComposite.MeasurementAggregateWithUnits, StorageNodeLoadComposite>() {
                        @Override
                        public StorageNodeLoadComposite apply(
                                StorageNodeLoadComposite.MeasurementAggregateWithUnits metric) {
                            result.setHeapPercentageUsed(metric);
                            return result;
                        }
                    });
            compositeFutures.add(wrapFuture(compositeFuture, result,
                    "Failed to retrieve metric [" + METRIC_HEAP_USED_PERCENTAGE + "] data for " + node));
        }

        return Futures.successfulAsList(compositeFutures);
    } finally {
        if (log.isDebugEnabled()) {
            stopwatchEnd(stopwatch, "Retrieved load metrics for " + node + " in ");
        }
    }
}

From source file:org.apache.brooklyn.core.mgmt.persist.BrooklynMementoPersisterToObjectStore.java

/** See {@link BrooklynPersistenceUtils} for conveniences for using this method. */
@Override/*from  www  .j a v  a2s. c  om*/
@Beta
public void checkpoint(BrooklynMementoRawData newMemento, PersistenceExceptionHandler exceptionHandler) {
    checkWritesAllowed();
    try {
        lock.writeLock().lockInterruptibly();
    } catch (InterruptedException e) {
        throw Exceptions.propagate(e);
    }

    try {
        objectStore.prepareForMasterUse();

        Stopwatch stopwatch = Stopwatch.createStarted();
        List<ListenableFuture<?>> futures = Lists.newArrayList();

        for (BrooklynObjectType type : BrooklynPersistenceUtils.STANDARD_BROOKLYN_OBJECT_TYPE_PERSISTENCE_ORDER) {
            for (Map.Entry<String, String> entry : newMemento.getObjectsOfType(type).entrySet()) {
                futures.add(asyncPersist(type.getSubPathName(), type, entry.getKey(), entry.getValue(),
                        exceptionHandler));
            }
        }

        try {
            // Wait for all the tasks to complete or fail, rather than aborting on the first failure.
            // But then propagate failure if any fail. (hence the two calls).
            Futures.successfulAsList(futures).get();
            Futures.allAsList(futures).get();
        } catch (Exception e) {
            throw Exceptions.propagate(e);
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Checkpointed entire memento in {}", Time.makeTimeStringRounded(stopwatch));
    } finally {
        lock.writeLock().unlock();
    }
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchRefresh.java

@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processCatalogs(final List<String> catalogNames) {
    log.info("Start: Full refresh of catalogs: {}", catalogNames);
    final List<ListenableFuture<CatalogDto>> getCatalogFutures = catalogNames.stream()
            .map(catalogName -> service.submit(() -> {
                CatalogDto result = null;
                try {
                    result = getCatalog(catalogName);
                } catch (Exception e) {
                    log.error("Failed to retrieve catalog: {}", catalogName);
                    elasticSearchUtil.log("ElasticSearchRefresh.getCatalog",
                            ElasticSearchDoc.Type.catalog.name(), catalogName, null, e.getMessage(), e, true);
                }//from  w w w.  ja  v a 2  s .  c om
                return result;
            })).collect(Collectors.toList());
    return Futures.transformAsync(Futures.successfulAsList(getCatalogFutures), input -> {
        final List<ListenableFuture<Void>> processCatalogFutures = input.stream().filter(NOT_NULL)
                .map(catalogDto -> {
                    final List<QualifiedName> databaseNames = getDatabaseNamesToRefresh(catalogDto);
                    return _processDatabases(catalogDto.getName(), databaseNames);
                }).filter(NOT_NULL).collect(Collectors.toList());
        return Futures.transform(Futures.successfulAsList(processCatalogFutures), Functions.constant(null));
    });
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java

/**
 * Process the list of databases./*w  w  w .  j  a  va 2s  . c  om*/
 *
 * @param catalogName   catalog name
 * @param databaseNames database names
 * @return future
 */
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processDatabases(final QualifiedName catalogName,
        final List<QualifiedName> databaseNames) {
    ListenableFuture<Void> resultFuture = null;
    log.info("Full refresh of catalog {} for databases({}): {}", catalogName, databaseNames.size(),
            databaseNames);
    final List<ListenableFuture<DatabaseDto>> getDatabaseFutures = databaseNames.stream()
            .map(databaseName -> service.submit(() -> {
                DatabaseDto result = null;
                try {
                    result = getDatabase(databaseName);
                } catch (Exception e) {
                    log.error("Failed to retrieve database: {}", databaseName);
                    elasticSearchUtil.log("ElasticSearchMetacatRefresh.getDatabase",
                            ElasticSearchDoc.Type.database.name(), databaseName.toString(), null,
                            e.getMessage(), e, true);
                }
                return result;
            })).collect(Collectors.toList());

    if (getDatabaseFutures != null && !getDatabaseFutures.isEmpty()) {
        resultFuture = Futures.transformAsync(Futures.successfulAsList(getDatabaseFutures), input -> {
            final ListenableFuture<Void> processDatabaseFuture = indexDatabaseDtos(catalogName, input);
            final List<ListenableFuture<Void>> processDatabaseFutures = input.stream().filter(NOT_NULL)
                    .map(databaseDto -> {
                        final List<QualifiedName> tableNames = databaseDto.getTables().stream()
                                .map(s -> QualifiedName.ofTable(databaseDto.getName().getCatalogName(),
                                        databaseDto.getName().getDatabaseName(), s))
                                .collect(Collectors.toList());
                        log.info("Full refresh of database {} for tables({}): {}",
                                databaseDto.getName().toString(), databaseDto.getTables().size(),
                                databaseDto.getTables());
                        return processTables(databaseDto.getName(), tableNames);
                    }).filter(NOT_NULL).collect(Collectors.toList());
            processDatabaseFutures.add(processDatabaseFuture);
            return Futures.transform(Futures.successfulAsList(processDatabaseFutures),
                    Functions.constant(null));
        });
    }

    return resultFuture;
}

From source file:org.thingsboard.server.dao.relation.BaseRelationService.java

private ListenableFuture<Set<EntityRelation>> findRelationsRecursively(final TenantId tenantId,
        final EntityId rootId, final EntitySearchDirection direction, RelationTypeGroup relationTypeGroup,
        int lvl, final ConcurrentHashMap<EntityId, Boolean> uniqueMap) throws Exception {
    if (lvl == 0) {
        return Futures.immediateFuture(Collections.emptySet());
    }/*from  w  ww. j  a v  a2 s .  c om*/
    lvl--;
    //TODO: try to remove this blocking operation
    Set<EntityRelation> children = new HashSet<>(
            findRelations(tenantId, rootId, direction, relationTypeGroup).get());
    Set<EntityId> childrenIds = new HashSet<>();
    for (EntityRelation childRelation : children) {
        log.trace("Found Relation: {}", childRelation);
        EntityId childId;
        if (direction == EntitySearchDirection.FROM) {
            childId = childRelation.getTo();
        } else {
            childId = childRelation.getFrom();
        }
        if (uniqueMap.putIfAbsent(childId, Boolean.TRUE) == null) {
            log.trace("Adding Relation: {}", childId);
            if (childrenIds.add(childId)) {
                log.trace("Added Relation: {}", childId);
            }
        }
    }
    List<ListenableFuture<Set<EntityRelation>>> futures = new ArrayList<>();
    for (EntityId entityId : childrenIds) {
        futures.add(findRelationsRecursively(tenantId, entityId, direction, relationTypeGroup, lvl, uniqueMap));
    }
    //TODO: try to remove this blocking operation
    List<Set<EntityRelation>> relations = Futures.successfulAsList(futures).get();
    relations.forEach(r -> r.forEach(children::add));
    return Futures.immediateFuture(children);
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java

/**
 * Process the list of tables in batches.
 *
 * @param databaseName database name/*from ww  w.  jav a 2 s  . c  om*/
 * @param tableNames   table names
 * @return A future containing the tasks
 */
private ListenableFuture<Void> processTables(final QualifiedName databaseName,
        final List<QualifiedName> tableNames) {
    final List<List<QualifiedName>> tableNamesBatches = Lists.partition(tableNames, 500);
    final List<ListenableFuture<Void>> processTablesBatchFutures = tableNamesBatches.stream()
            .map(subTableNames -> _processTables(databaseName, subTableNames)).collect(Collectors.toList());

    return Futures.transform(Futures.successfulAsList(processTablesBatchFutures), Functions.constant(null));
}