Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:org.jclouds.openstack.swift.v1.blobstore.RegionScopedSwiftBlobStore.java

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides,
        ListeningExecutorService executor) {
    ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();

    long contentLength = checkNotNull(blob.getMetadata().getContentMetadata().getContentLength(),
            "must provide content-length to use multi-part upload");
    MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
            getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
    long partSize = algorithm.calculateChunkSize(contentLength);
    MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), partSize, overrides);
    int partNumber = 0;

    for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
        BlobUploader b = new BlobUploader(mpu, partNumber++, payload);
        parts.add(executor.submit(b));//  www  .  j a va  2  s . c  o m
    }

    return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
}

From source file:org.hawkular.alerts.engine.impl.CassActionsServiceImpl.java

private boolean filterByResult(String tenantId, Set<ActionHistoryPK> actionPks, ActionsCriteria criteria)
        throws Exception {
    boolean filterByResult = false;
    if (criteria.getResult() != null || (criteria.getResults() != null && !criteria.getResults().isEmpty())) {
        filterByResult = true;//from  www  . j av a 2s. c  o  m

        PreparedStatement selectActionHistoryResult = CassStatement.get(session,
                CassStatement.SELECT_ACTION_HISTORY_RESULT);

        List<ResultSetFuture> futures = new ArrayList<>();
        if (criteria.getResult() != null) {
            futures.add(session.executeAsync(selectActionHistoryResult.bind(tenantId, criteria.getResult())));
        }
        if (criteria.getResults() != null && !criteria.getResults().isEmpty()) {
            for (String result : criteria.getResults()) {
                futures.add(session.executeAsync(selectActionHistoryResult.bind(tenantId, result)));
            }
        }

        List<ResultSet> rsActionHistory = Futures.allAsList(futures).get();
        rsActionHistory.stream().forEach(r -> {
            for (Row row : r) {
                ActionHistoryPK actionHistoryPK = new ActionHistoryPK();
                actionHistoryPK.tenantId = tenantId;
                actionHistoryPK.actionPlugin = row.getString("actionPlugin");
                actionHistoryPK.actionId = row.getString("actionId");
                actionHistoryPK.alertId = row.getString("alertId");
                actionHistoryPK.ctime = row.getLong("ctime");
                actionPks.add(actionHistoryPK);
            }
        });
    }
    return filterByResult;
}

From source file:org.apache.brooklyn.core.mgmt.persist.BrooklynMementoPersisterToObjectStore.java

/**
 * Concurrent calls will queue-up (the lock is "fair", which means an "approximately arrival-order policy").
 * Current usage is with the {@link PeriodicDeltaChangeListener} so we expect only one call at a time.
 * /* w ww.j av a  2 s.com*/
 * TODO Longer term, if we care more about concurrent calls we could merge the queued deltas so that we
 * don't do unnecessary repeated writes of an entity.
 */
private Stopwatch deltaImpl(Delta delta, PersistenceExceptionHandler exceptionHandler) {
    try {
        lock.writeLock().lockInterruptibly();
    } catch (InterruptedException e) {
        throw Exceptions.propagate(e);
    }
    try {
        objectStore.prepareForMasterUse();

        Stopwatch stopwatch = Stopwatch.createStarted();
        List<ListenableFuture<?>> futures = Lists.newArrayList();

        for (BrooklynObjectType type : BrooklynPersistenceUtils.STANDARD_BROOKLYN_OBJECT_TYPE_PERSISTENCE_ORDER) {
            for (Memento entity : delta.getObjectsOfType(type)) {
                futures.add(asyncPersist(type.getSubPathName(), entity, exceptionHandler));
            }
        }
        for (BrooklynObjectType type : BrooklynPersistenceUtils.STANDARD_BROOKLYN_OBJECT_TYPE_PERSISTENCE_ORDER) {
            for (String id : delta.getRemovedIdsOfType(type)) {
                futures.add(asyncDelete(type.getSubPathName(), id, exceptionHandler));
            }
        }

        try {
            // Wait for all the tasks to complete or fail, rather than aborting on the first failure.
            // But then propagate failure if any fail. (hence the two calls).
            Futures.successfulAsList(futures).get();
            Futures.allAsList(futures).get();
        } catch (Exception e) {
            throw Exceptions.propagate(e);
        }

        return stopwatch;
    } finally {
        lock.writeLock().unlock();
    }
}

From source file:org.voltdb.export.ExportGeneration.java

public void closeAndDelete() throws IOException {
    List<ListenableFuture<?>> tasks = new ArrayList<ListenableFuture<?>>();
    for (HashMap<String, ExportDataSource> map : m_dataSourcesByPartition.values()) {
        for (ExportDataSource source : map.values()) {
            tasks.add(source.closeAndDelete());
        }//from w w w. ja  v a2  s.co  m
    }
    try {
        Futures.allAsList(tasks).get();
    } catch (Exception e) {
        Throwables.propagateIfPossible(e, IOException.class);
    }
    shutdown = true;
    VoltFile.recursivelyDelete(m_directory);

}

From source file:org.jclouds.openstack.swift.v1.blobstore.RegionScopedSwiftBlobStore.java

@Override
@Beta/*  ww  w . ja  va2  s  .c o m*/
public void downloadBlob(String container, String name, File destination, ExecutorService executor) {

    ListeningExecutorService listeningExecutor = MoreExecutors.listeningDecorator(executor);
    RandomAccessFile raf = null;
    File tempFile = new File(destination.getName() + "." + UUID.randomUUID());
    try {
        long contentLength = api.getObjectApi(regionId, container).getWithoutBody(name).getPayload()
                .getContentMetadata().getContentLength();

        // Reserve space for performance reasons
        raf = new RandomAccessFile(tempFile, "rw");
        raf.seek(contentLength - 1);
        raf.write(0);

        // Determine download buffer size, smaller means less memory usage; larger is faster as long as threads are saturated
        long partSize = getMinimumMultipartPartSize();

        // Loop through ranges within the file
        long from;
        long to;
        List<ListenableFuture<Void>> results = new ArrayList<ListenableFuture<Void>>();

        for (from = 0; from < contentLength; from = from + partSize) {
            to = (from + partSize >= contentLength) ? contentLength - 1 : from + partSize - 1;
            BlobDownloader b = new BlobDownloader(regionId, container, name, raf, from, to);
            results.add(listeningExecutor.submit(b));
        }

        Futures.getUnchecked(Futures.allAsList(results));

        raf.getChannel().force(true);
        raf.getChannel().close();
        raf.close();

        if (destination.exists()) {
            destination.delete();
        }
        if (!tempFile.renameTo(destination)) {
            throw new RuntimeException(
                    "Could not move temporary downloaded file to destination " + destination);
        }
        tempFile = null;
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        Closeables2.closeQuietly(raf);
        if (tempFile != null) {
            tempFile.delete();
        }
    }
}

From source file:org.apache.druid.segment.realtime.appenderator.AppenderatorImpl.java

@Override
public void close() {
    if (!closed.compareAndSet(false, true)) {
        log.info("Appenderator already closed");
        return;/*w ww.jav  a2s  .  c o m*/
    }

    log.info("Shutting down...");

    final List<ListenableFuture<?>> futures = Lists.newArrayList();
    for (Map.Entry<SegmentIdentifier, Sink> entry : sinks.entrySet()) {
        futures.add(abandonSegment(entry.getKey(), entry.getValue(), false));
    }

    try {
        Futures.allAsList(futures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        log.warn(e, "Interrupted during close()");
    } catch (ExecutionException e) {
        log.warn(e, "Unable to abandon existing segments during close()");
    }

    try {
        shutdownExecutors();
        Preconditions.checkState(
                persistExecutor == null || persistExecutor.awaitTermination(365, TimeUnit.DAYS),
                "persistExecutor not terminated");
        Preconditions.checkState(pushExecutor == null || pushExecutor.awaitTermination(365, TimeUnit.DAYS),
                "pushExecutor not terminated");
        Preconditions.checkState(
                intermediateTempExecutor == null
                        || intermediateTempExecutor.awaitTermination(365, TimeUnit.DAYS),
                "intermediateTempExecutor not terminated");
        persistExecutor = null;
        pushExecutor = null;
        intermediateTempExecutor = null;
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new ISE("Failed to shutdown executors during close()");
    }

    // Only unlock if executors actually shut down.
    unlockBasePersistDirectory();
}

From source file:org.voltdb.export.ExportGeneration.java

public void truncateExportToTxnId(long txnId, long[] perPartitionTxnIds) {
    // create an easy partitionId:txnId lookup.
    HashMap<Integer, Long> partitionToTxnId = new HashMap<Integer, Long>();
    for (long tid : perPartitionTxnIds) {
        partitionToTxnId.put(TxnEgo.getPartitionId(tid), tid);
    }//from   ww w  . j av a2s. co  m

    List<ListenableFuture<?>> tasks = new ArrayList<ListenableFuture<?>>();

    // pre-iv2, the truncation point is the snapshot transaction id.
    // In iv2, truncation at the per-partition txn id recorded in the snapshot.
    for (HashMap<String, ExportDataSource> dataSources : m_dataSourcesByPartition.values()) {
        for (ExportDataSource source : dataSources.values()) {
            if (VoltDB.instance().isIV2Enabled()) {
                Long truncationPoint = partitionToTxnId.get(source.getPartitionId());
                if (truncationPoint == null) {
                    exportLog.error("Snapshot " + txnId + " does not include truncation point for partition "
                            + source.getPartitionId());
                } else {
                    tasks.add(source.truncateExportToTxnId(truncationPoint));
                }
            } else {
                tasks.add(source.truncateExportToTxnId(txnId));
            }
        }
    }
    try {
        Futures.allAsList(tasks).get();
    } catch (Exception e) {
        VoltDB.crashLocalVoltDB("Unexpected exception truncating export data during snapshot restore. "
                + "You can back up export overflow data and start the "
                + "DB without it to get past this error", true, e);
    }
}

From source file:org.voltdb.export.ExportGeneration.java

public void close() {
    List<ListenableFuture<?>> tasks = new ArrayList<ListenableFuture<?>>();
    for (HashMap<String, ExportDataSource> sources : m_dataSourcesByPartition.values()) {
        for (ExportDataSource source : sources.values()) {
            tasks.add(source.close());// ww w. ja va  2  s .  c o  m
        }
    }
    try {
        Futures.allAsList(tasks).get();
    } catch (Exception e) {
        //Logging of errors  is done inside the tasks so nothing to do here
        //intentionally not failing if there is an issue with close
        exportLog.error("Error closing export data sources", e);
    }
    shutdown = true;
}

From source file:org.hawkular.alerts.engine.impl.CassAlertsServiceImpl.java

private List<Alert> getAlerts(String tenantId, AlertsCriteria criteria) throws Exception {
    boolean filter = (null != criteria && criteria.hasCriteria());
    boolean thin = (null != criteria && criteria.isThin());

    if (filter && log.isDebugEnabled()) {
        log.debug("getAlerts criteria: " + criteria.toString());
    }//from  ww  w . j av  a  2 s.c  o m

    List<Alert> alerts = new ArrayList<>();
    Set<String> alertIds = new HashSet<>();
    boolean activeFilter = false;

    try {
        if (filter) {
            /*
            Get alertIds explicitly added into the criteria. Start with these as there is no query involved
             */
            if (criteria.hasAlertIdCriteria()) {
                Set<String> alertIdsFilteredByAlerts = filterByAlerts(criteria);
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByAlerts);
                } else {
                    alertIds.addAll(alertIdsFilteredByAlerts);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Get alertIds via tagQuery
             */
            if (criteria.hasTagQueryCriteria()) {
                Set<String> alertIdsFilteredByTagQuery = getIdsByTagQuery(tenantId, TagType.ALERT,
                        criteria.getTagQuery());
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByTagQuery);
                } else {
                    alertIds.addAll(alertIdsFilteredByTagQuery);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Get alertIds filtered by triggerIds clause
             */
            if (criteria.hasTriggerIdCriteria()) {
                Set<String> alertIdsFilteredByTriggers = filterByTriggers(tenantId, criteria);
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByTriggers);
                } else {
                    alertIds.addAll(alertIdsFilteredByTriggers);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Get alertIds filtered by time clause
             */
            if (criteria.hasCTimeCriteria()) {
                Set<String> alertIdsFilteredByTime = filterByCTime(tenantId, criteria);
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByTime);
                } else {
                    alertIds.addAll(alertIdsFilteredByTime);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Get alertIds filtered by resolved time clause
             */
            if (criteria.hasResolvedTimeCriteria()) {
                Set<String> alertIdsFilteredByResolvedTime = filterByResolvedTime(tenantId, criteria);
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByResolvedTime);
                } else {
                    alertIds.addAll(alertIdsFilteredByResolvedTime);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Get alertIds filtered by ack time clause
             */
            if (criteria.hasAckTimeCriteria()) {
                Set<String> alertIdsFilteredByAckTime = filterByAckTime(tenantId, criteria);
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByAckTime);
                } else {
                    alertIds.addAll(alertIdsFilteredByAckTime);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Get alertsIds filteres by status time clause
             */
            if (criteria.hasStatusTimeCriteria()) {
                Set<String> alertIdsFilteredByStatusTime = filterByStatusTime(tenantId, criteria);
                if (activeFilter) {
                    alertIds.retainAll(alertIdsFilteredByStatusTime);
                } else {
                    alertIds.addAll(alertIdsFilteredByStatusTime);
                }
                if (alertIds.isEmpty()) {
                    return alerts;
                }
                activeFilter = true;
            }

            /*
            Below this point we filter manually  because the remaining filters have a low cardinality of
            values, and are not efficiently handled with database indexes and the intersection-based approach.
            Fetch the alerts now and proceed.
             */
            if (activeFilter) {
                PreparedStatement selectAlertsByTenantAndAlert = CassStatement.get(session,
                        CassStatement.SELECT_ALERT);
                List<ResultSetFuture> futures = alertIds.stream().map(
                        alertId -> session.executeAsync(selectAlertsByTenantAndAlert.bind(tenantId, alertId)))
                        .collect(Collectors.toList());
                List<ResultSet> rsAlerts = Futures.allAsList(futures).get();
                rsAlerts.stream().forEach(r -> {
                    for (Row row : r) {
                        String payload = row.getString("payload");
                        Alert alert = JsonUtil.fromJson(payload, Alert.class, thin);
                        alerts.add(alert);
                    }
                });
            } else {
                // This is the worst-case scenario of criteria featuring only manual filtering.  Generate a
                // warning because clients should be discouraged from using such vague criteria.
                log.warnf("Only supplying Severity and/or Status can be slow and return large Sets: %s",
                        criteria);
                fetchAllAlerts(tenantId, thin, alerts);
            }

            /*
             filter by severities
             */
            if (criteria.hasSeverityCriteria()) {
                filterBySeverities(tenantId, criteria, alerts);
                if (alerts.isEmpty()) {
                    return alerts;
                }
            }

            /*
            filter by statuses
             */
            if (criteria.hasStatusCriteria()) {
                filterByStatuses(tenantId, criteria, alerts);
                if (alerts.isEmpty()) {
                    return alerts;
                }
            }
        } else {
            /*
            Get all alerts - Single query
             */
            fetchAllAlerts(tenantId, thin, alerts);
        }

    } catch (Exception e) {
        msgLog.errorDatabaseException(e.getMessage());
        throw e;
    }

    return alerts;
}

From source file:brooklyn.entity.basic.Entities.java

/**
 * Stops, destroys, and unmanages all apps in the given context, and then terminates the management context.
 * /*  w ww .  j a v  a  2s  .  co m*/
 * Apps will be stopped+destroyed+unmanaged concurrently, waiting for all to complete.
 */
public static void destroyAll(final ManagementContext mgmt) {
    if (mgmt instanceof NonDeploymentManagementContext) {
        // log here because it is easy for tests to destroyAll(app.getMgmtContext())
        // which will *not* destroy the mgmt context if the app has been stopped!
        log.warn("Entities.destroyAll invoked on non-deployment " + mgmt + " - not likely to have much effect! "
                + "(This usually means the mgmt context has been taken from an entity that has been destroyed. "
                + "To destroy other things on the management context ensure you keep a handle to the context "
                + "before the entity is destroyed, such as by creating the management context first.)");
    }
    if (!mgmt.isRunning())
        return;

    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
    List<ListenableFuture<?>> futures = Lists.newArrayList();
    final AtomicReference<Exception> error = Atomics.newReference();
    try {
        log.debug("destroying all apps in " + mgmt + ": " + mgmt.getApplications());
        for (final Application app : mgmt.getApplications()) {
            futures.add(executor.submit(new Runnable() {
                public void run() {
                    log.debug("destroying app " + app + " (managed? " + isManaged(app) + "; mgmt is " + mgmt
                            + ")");
                    try {
                        destroy(app);
                        log.debug("destroyed app " + app + "; mgmt now " + mgmt);
                    } catch (Exception e) {
                        log.warn("problems destroying app " + app + " (mgmt now " + mgmt
                                + ", will rethrow at least one exception): " + e);
                        error.compareAndSet(null, e);
                    }
                }
            }));
        }
        Futures.allAsList(futures).get();

        for (Location loc : mgmt.getLocationManager().getLocations()) {
            destroyCatching(loc);
        }
        if (mgmt instanceof ManagementContextInternal) {
            ((ManagementContextInternal) mgmt).terminate();
        }
        if (error.get() != null)
            throw Exceptions.propagate(error.get());
    } catch (InterruptedException e) {
        throw Exceptions.propagate(e);
    } catch (ExecutionException e) {
        throw Exceptions.propagate(e);
    } finally {
        executor.shutdownNow();
    }
}