Example usage for com.google.common.util.concurrent Futures allAsList

List of usage examples for com.google.common.util.concurrent Futures allAsList

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures allAsList.

Prototype

@Beta
@CheckReturnValue
public static <V> ListenableFuture<List<V>> allAsList(
        Iterable<? extends ListenableFuture<? extends V>> futures) 

Source Link

Document

Creates a new ListenableFuture whose value is a list containing the values of all its input futures, if all succeed.

Usage

From source file:org.apache.druid.query.groupby.epinephelinae.ConcurrentGrouper.java

private List<CloseableIterator<Entry<KeyType>>> parallelSortAndGetGroupersIterator() {
    // The number of groupers is same with the number of processing threads in the executor
    final ListenableFuture<List<CloseableIterator<Entry<KeyType>>>> future = Futures
            .allAsList(groupers.stream().map(grouper -> executor
                    .submit(new AbstractPrioritizedCallable<CloseableIterator<Entry<KeyType>>>(priority) {
                        @Override
                        public CloseableIterator<Entry<KeyType>> call() {
                            return grouper.iterator(true);
                        }//from   ww  w .ja  v  a 2  s  . co  m
                    })).collect(Collectors.toList()));

    try {
        final long timeout = queryTimeoutAt - System.currentTimeMillis();
        return hasQueryTimeout ? future.get(timeout, TimeUnit.MILLISECONDS) : future.get();
    } catch (InterruptedException | TimeoutException e) {
        future.cancel(true);
        throw new QueryInterruptedException(e);
    } catch (CancellationException e) {
        throw new QueryInterruptedException(e);
    } catch (ExecutionException e) {
        throw new RuntimeException(e.getCause());
    }
}

From source file:org.apache.cassandra.repair.RepairJob.java

/**
 * Creates {@link ValidationTask} and submit them to task executor so that tasks run sequentially within each dc.
 *//*from  w w w .j  av a 2  s.c  o m*/
private ListenableFuture<List<TreeResponse>> sendDCAwareValidationRequest(Collection<InetAddress> endpoints) {
    int gcBefore = Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily)
            .gcBefore(System.currentTimeMillis());
    List<ListenableFuture<TreeResponse>> tasks = new ArrayList<>(endpoints.size());

    Map<String, Queue<InetAddress>> requestsByDatacenter = new HashMap<>();
    for (InetAddress endpoint : endpoints) {
        String dc = DatabaseDescriptor.getEndpointSnitch().getDatacenter(endpoint);
        Queue<InetAddress> queue = requestsByDatacenter.get(dc);
        if (queue == null) {
            queue = new LinkedList<>();
            requestsByDatacenter.put(dc, queue);
        }
        queue.add(endpoint);
    }

    for (Map.Entry<String, Queue<InetAddress>> entry : requestsByDatacenter.entrySet()) {
        Queue<InetAddress> requests = entry.getValue();
        InetAddress address = requests.poll();
        ValidationTask firstTask = new ValidationTask(desc, address, gcBefore);
        logger.info("Validating {}", address);
        session.waitForValidation(Pair.create(desc, address), firstTask);
        tasks.add(firstTask);
        ValidationTask currentTask = firstTask;
        while (requests.size() > 0) {
            final InetAddress nextAddress = requests.poll();
            final ValidationTask nextTask = new ValidationTask(desc, nextAddress, gcBefore);
            tasks.add(nextTask);
            Futures.addCallback(currentTask, new FutureCallback<TreeResponse>() {
                public void onSuccess(TreeResponse result) {
                    logger.info("Validating {}", nextAddress);
                    session.waitForValidation(Pair.create(desc, nextAddress), nextTask);
                    taskExecutor.execute(nextTask);
                }

                // failure is handled at root of job chain
                public void onFailure(Throwable t) {
                }
            });
            currentTask = nextTask;
        }
        // start running tasks
        taskExecutor.execute(firstTask);
    }
    return Futures.allAsList(tasks);
}

From source file:org.opendaylight.vbd.impl.VbdBridgeDomain.java

private ListenableFuture<Void> handleNewTopology(final DataObjectModification<Topology> modification) {
    Preconditions.checkNotNull(modification.getDataAfter());
    final Topology data = modification.getDataAfter();
    // Handle VBridge augmentation
    final TopologyVbridgeAugment vbdConfiguration = data.getAugmentation(TopologyVbridgeAugment.class);
    if (vbdConfiguration != null) {
        // Spread configuration
        setConfiguration(vbdConfiguration);
        vppModifier.setConfig(vbdConfiguration);
    } else {//  w  w w  .  j  av a  2s . c  o  m
        LOG.error("Topology {} has no configuration", PPrint.topology(topology));
    }
    // Handle new nodes
    final Collection<DataObjectModification<? extends DataObject>> modifiedChildren = modification
            .getModifiedChildren();
    final List<ListenableFuture<Void>> newCumulativeTopologyResult = new ArrayList<>();
    for (final DataObjectModification<? extends DataObject> childNode : modifiedChildren) {
        LOG.debug("Processing created child {} from topology {}", childNode, PPrint.topology(topology));
        if (Node.class.isAssignableFrom(childNode.getDataType())) {
            newCumulativeTopologyResult.add(handleModifiedNode(childNode));
        }
    }
    final ListenableFuture<List<Void>> newTopologyResult = Futures.allAsList(newCumulativeTopologyResult);
    return transform(newTopologyResult);
}

From source file:com.yahoo.yqlplus.engine.internal.java.sequences.Sequences.java

public static <ROW, SEQUENCE extends Iterable<ROW>, SET> ListenableFuture<List<ROW>> invokeAsyncBatchSet(
        final Executor executor, final AsyncFunction<List<SET>, SEQUENCE> source, final Tracer tracer,
        final Timeout timeout, final TimeoutHandler handler, final List<ROW>... inputs) throws Exception {
    final List<ListenableFuture<SEQUENCE>> results = Lists.newArrayList();
    final Tracer childTracer = tracer.start(tracer.getGroup(), tracer.getName());
    for (List<ROW> input : inputs) {
        for (int i = 0; i < input.size(); i++) {
            Record record = (Record) input.get(i);
            List methodArgs = Lists.newArrayList();
            Iterable<String> fieldNames = record.getFieldNames();
            for (String fieldName : fieldNames) {
                methodArgs.add(record.get(fieldName));
            }//from w  w w.ja v  a2 s .co  m
            ListenableFuture<SEQUENCE> result = source.apply(methodArgs);
            results.add(result);
            result.addListener(new Runnable() {
                @Override
                public void run() {
                    childTracer.end();
                }
            }, MoreExecutors.sameThreadExecutor());
        }
    }
    final ListenableFuture<List<SEQUENCE>> gather = Futures.allAsList(results);
    return handler.withTimeout(gatherResults(executor, gather, 1), timeout.verify(), timeout.getTickUnits());
}

From source file:com.facebook.presto.hive.HiveSplitSourceProvider.java

private void loadPartitionSplits(final HiveSplitSource hiveSplitSource, SuspendingExecutor suspendingExecutor,
        final ConnectorSession session) {
    final Semaphore semaphore = new Semaphore(maxPartitionBatchSize);
    try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
        ImmutableList.Builder<ListenableFuture<Void>> futureBuilder = ImmutableList.builder();

        for (HivePartitionMetadata partition : partitions) {
            final String partitionName = partition.getHivePartition().getPartitionId();
            final Properties schema = getPartitionSchema(table, partition.getPartition());
            final List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition.getPartition());
            final TupleDomain<HiveColumnHandle> effectivePredicate = partition.getHivePartition()
                    .getEffectivePredicate();

            Path path = new Path(getPartitionLocation(table, partition.getPartition()));
            Configuration configuration = hdfsEnvironment.getConfiguration(path);
            final InputFormat<?, ?> inputFormat = getInputFormat(configuration, schema, false);

            if (inputFormat instanceof SymlinkTextInputFormat) {
                JobConf jobConf = new JobConf(configuration);
                FileInputFormat.setInputPaths(jobConf, path);
                InputSplit[] splits = inputFormat.getSplits(jobConf, 0);
                for (InputSplit rawSplit : splits) {
                    FileSplit split = ((SymlinkTextInputFormat.SymlinkTextInputSplit) rawSplit)
                            .getTargetSplit();

                    // get the filesystem for the target path -- it may be a different hdfs instance
                    FileSystem targetFilesystem = hdfsEnvironment.getFileSystem(split.getPath());
                    FileStatus fileStatus = targetFilesystem.getFileStatus(split.getPath());
                    hiveSplitSource.addToQueue(createHiveSplits(partitionName, fileStatus,
                            targetFilesystem.getFileBlockLocations(fileStatus, split.getStart(),
                                    split.getLength()),
                            split.getStart(), split.getLength(), schema, partitionKeys, false, session,
                            effectivePredicate));
                }// ww  w .j a v a  2  s  .c o  m
                continue;
            }

            // TODO: this is currently serial across all partitions and should be done in suspendingExecutor
            FileSystem fs = hdfsEnvironment.getFileSystem(path);
            if (bucket.isPresent()) {
                Optional<FileStatus> bucketFile = getBucketFile(bucket.get(), fs, path);
                if (bucketFile.isPresent()) {
                    FileStatus file = bucketFile.get();
                    BlockLocation[] blockLocations = fs.getFileBlockLocations(file, 0, file.getLen());
                    boolean splittable = isSplittable(inputFormat, fs, file.getPath());

                    hiveSplitSource.addToQueue(createHiveSplits(partitionName, file, blockLocations, 0,
                            file.getLen(), schema, partitionKeys, splittable, session, effectivePredicate));

                    continue;
                }
            }

            // Acquire semaphore so that we only have a fixed number of outstanding partitions being processed asynchronously
            // NOTE: there must not be any calls that throw in the space between acquiring the semaphore and setting the Future
            // callback to release it. Otherwise, we will need a try-finally block around this section.
            try {
                semaphore.acquire();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                return;
            }

            ListenableFuture<Void> partitionFuture = createAsyncWalker(fs, suspendingExecutor).beginWalk(path,
                    new FileStatusCallback() {
                        @Override
                        public void process(FileStatus file, BlockLocation[] blockLocations) {
                            try {
                                boolean splittable = isSplittable(inputFormat,
                                        hdfsEnvironment.getFileSystem(file.getPath()), file.getPath());

                                hiveSplitSource.addToQueue(createHiveSplits(partitionName, file, blockLocations,
                                        0, file.getLen(), schema, partitionKeys, splittable, session,
                                        effectivePredicate));
                            } catch (IOException e) {
                                hiveSplitSource.fail(e);
                            }
                        }
                    });

            // release the semaphore when the partition finishes
            Futures.addCallback(partitionFuture, new FutureCallback<Void>() {
                @Override
                public void onSuccess(Void result) {
                    semaphore.release();
                }

                @Override
                public void onFailure(Throwable t) {
                    semaphore.release();
                }
            });

            futureBuilder.add(partitionFuture);
        }

        // when all partitions finish, mark the queue as finished
        Futures.addCallback(Futures.allAsList(futureBuilder.build()), new FutureCallback<List<Void>>() {
            @Override
            public void onSuccess(List<Void> result) {
                hiveSplitSource.finished();
            }

            @Override
            public void onFailure(Throwable t) {
                hiveSplitSource.fail(t);
            }
        });
    } catch (Throwable e) {
        hiveSplitSource.fail(e);
        Throwables.propagateIfInstanceOf(e, Error.class);
    }
}

From source file:io.druid.server.lookup.cache.LookupCoordinatorManager.java

void deleteAllOnTier(final String tier, final Collection<String> dropLookups)
        throws ExecutionException, InterruptedException, IOException {
    if (dropLookups.isEmpty()) {
        LOG.debug("Nothing to drop");
        return;//from  www .ja  v a  2s .c o m
    }
    final Collection<URL> urls = getAllHostsAnnounceEndpoint(tier);
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {
            @Override
            public void run() {
                for (final String drop : dropLookups) {
                    final URL lookupURL;
                    try {
                        lookupURL = new URL(url.getProtocol(), url.getHost(), url.getPort(),
                                String.format("%s/%s", url.getFile(), drop));
                    } catch (MalformedURLException e) {
                        throw new ISE(e, "Error creating url for [%s]/[%s]", url, drop);
                    }
                    try {
                        deleteOnHost(lookupURL);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        LOG.warn("Delete [%s] interrupted", lookupURL);
                        throw Throwables.propagate(e);
                    } catch (IOException | ExecutionException e) {
                        // Don't raise as ExecutionException. Just log and continue
                        LOG.makeAlert(e, "Error deleting [%s]", lookupURL).emit();
                    }
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
        throw new ExecutionException("Timeout in updating hosts! Attempting to cancel", e);
    }
}

From source file:com.datastax.driver.core.SessionManager.java

void updateCreatedPools(ListeningExecutorService executor) {
    try {//  w  w  w  .j ava2s . co  m
        // We do 2 iterations, so that we add missing pools first, and them remove all unecessary pool second.
        // That way, we'll avoid situation where we'll temporarily lose connectivity
        List<Host> toRemove = new ArrayList<Host>();
        List<ListenableFuture<?>> poolCreationFutures = new ArrayList<ListenableFuture<?>>();

        for (Host h : cluster.getMetadata().allHosts()) {
            HostDistance dist = loadBalancingPolicy().distance(h);
            HostConnectionPool pool = pools.get(h);

            if (pool == null) {
                if (dist != HostDistance.IGNORED && h.isUp())
                    poolCreationFutures.add(maybeAddPool(h, executor));
            } else if (dist != pool.hostDistance) {
                if (dist == HostDistance.IGNORED) {
                    toRemove.add(h);
                } else {
                    pool.hostDistance = dist;
                    pool.ensureCoreConnections();
                }
            }
        }

        // Wait pool creation before removing, so we don't lose connectivity
        Futures.allAsList(poolCreationFutures).get();

        List<ListenableFuture<?>> poolRemovalFutures = new ArrayList<ListenableFuture<?>>(toRemove.size());
        for (Host h : toRemove)
            poolRemovalFutures.add(removePool(h));

        Futures.allAsList(poolRemovalFutures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
        logger.error("Unexpected error while refreshing connection pools", e.getCause());
    }
}

From source file:com.facebook.buck.rules.CassandraArtifactCache.java

@Override
@SuppressWarnings("PMD.EmptyCatchBlock")
public void close() {
    isWaitingToClose.set(true);/*from   w  ww.j  a va 2 s  .  c  o m*/
    ListenableFuture<List<OperationResult<Void>>> future = Futures.allAsList(futures);
    try {
        future.get();
    } catch (InterruptedException | ExecutionException e) {
        // Swallow exception and move on.
    }
}

From source file:org.apache.streams.twitter.provider.TwitterTimelineProvider.java

@Override
public boolean isRunning() {
    if (providerQueue.isEmpty() && executor.isTerminated() && Futures.allAsList(futures).isDone()) {
        LOGGER.info("Completed");
        running.set(false);//from  w w  w.j  a v a  2s.c  o  m
        LOGGER.info("Exiting");
    }
    return running.get();
}

From source file:org.apache.beam.runners.dataflow.util.PackageUtil.java

static List<DataflowPackage> stageClasspathElements(Collection<String> classpathElements,
        final String stagingPath, final Sleeper retrySleeper, ListeningExecutorService executorService,
        final CreateOptions createOptions) {
    LOG.info("Uploading {} files from PipelineOptions.filesToStage to staging location to "
            + "prepare for execution.", classpathElements.size());

    if (classpathElements.size() > SANE_CLASSPATH_SIZE) {
        LOG.warn(/*from w w w .  j  av a 2s .c  o m*/
                "Your classpath contains {} elements, which Google Cloud Dataflow automatically "
                        + "copies to all workers. Having this many entries on your classpath may be indicative "
                        + "of an issue in your pipeline. You may want to consider trimming the classpath to "
                        + "necessary dependencies only, using --filesToStage pipeline option to override "
                        + "what files are being staged, or bundling several dependencies into one.",
                classpathElements.size());
    }

    checkArgument(stagingPath != null,
            "Can't stage classpath elements because no staging location has been provided");

    // Inline a copy here because the inner code returns an immutable list and we want to mutate it.
    List<PackageAttributes> packageAttributes = new LinkedList<>(
            computePackageAttributes(classpathElements, stagingPath, executorService));

    // Compute the returned list of DataflowPackage objects here so that they are returned in the
    // same order as on the classpath.
    List<DataflowPackage> packages = Lists.newArrayListWithExpectedSize(packageAttributes.size());
    for (final PackageAttributes attributes : packageAttributes) {
        packages.add(attributes.getDataflowPackage());
    }

    // Order package attributes in descending size order so that we upload the largest files first.
    Collections.sort(packageAttributes, new PackageUploadOrder());
    final AtomicInteger numUploaded = new AtomicInteger(0);
    final AtomicInteger numCached = new AtomicInteger(0);

    List<ListenableFuture<?>> futures = new LinkedList<>();
    for (final PackageAttributes attributes : packageAttributes) {
        futures.add(executorService.submit(new Runnable() {
            @Override
            public void run() {
                stageOnePackage(attributes, numUploaded, numCached, retrySleeper, createOptions);
            }
        }));
    }
    try {
        Futures.allAsList(futures).get();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException("Interrupted while staging packages", e);
    } catch (ExecutionException e) {
        throw new RuntimeException("Error while staging packages", e.getCause());
    }

    LOG.info("Staging files complete: {} files cached, {} files newly uploaded", numCached.get(),
            numUploaded.get());

    return packages;
}