Example usage for com.google.common.util.concurrent ListeningExecutorService submit

List of usage examples for com.google.common.util.concurrent ListeningExecutorService submit

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListeningExecutorService submit.

Prototype

@Override
ListenableFuture<?> submit(Runnable task);

Source Link

Usage

From source file:com.google.cloud.bigtable.grpc.async.ResourceLimiterPerf.java

/**
 * @param pool/*from   ww w .ja v  a 2 s. c om*/
 * @throws InterruptedException
 * @throws ExecutionException
 * @throws TimeoutException
 */
private static void test(ListeningExecutorService pool)
        throws InterruptedException, ExecutionException, TimeoutException {
    final ResourceLimiter underTest = new ResourceLimiter(SIZE, (int) SIZE);
    final LinkedBlockingQueue<Long> registeredEvents = new LinkedBlockingQueue<>();

    final int readerCount = 20;
    final int writerCount = 1;
    Runnable writePerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startReg = System.nanoTime();
            int offerCount = REGISTER_COUNT / writerCount;
            try {
                for (int i = 0; i < offerCount; i++) {
                    registeredEvents.offer(underTest.registerOperationWithHeapSize(1));
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startReg;
                System.out.println(String.format("Registered %d in %d ms.  %d nanos/reg.  %f offer/sec",
                        offerCount, totalTime / 1000000, totalTime / offerCount,
                        offerCount * 1000000000.0 / totalTime));
            }
        }
    };
    Runnable readPerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startComplete = System.nanoTime();
            int regCount = REGISTER_COUNT / readerCount;
            try {
                for (int i = 0; i < regCount; i++) {
                    Long registeredId = registeredEvents.poll(1, TimeUnit.SECONDS);
                    if (registeredId == null) {
                        i--;
                    } else {
                        underTest.markCanBeCompleted(registeredId);
                    }
                }
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startComplete;
                System.out.println(String.format(
                        "markCanBeCompleted %d in %d.  %d nanos/complete.  %f unreg/sec", regCount,
                        totalTime / 1000000, totalTime / regCount, regCount * 1000000000.0 / totalTime));
            }
        }
    };

    List<ListenableFuture<?>> writerFutures = new ArrayList<>();
    List<ListenableFuture<?>> readerFutures = new ArrayList<>();

    for (int i = 0; i < writerCount; i++) {
        writerFutures.add(pool.submit(writePerfRunnable));
    }
    Thread.sleep(10);
    for (int i = 0; i < readerCount; i++) {
        readerFutures.add(pool.submit(readPerfRunnable));
    }
    Futures.allAsList(writerFutures).get(300, TimeUnit.MINUTES);
    Futures.allAsList(readerFutures).get(300, TimeUnit.MINUTES);
}

From source file:com.google.cloud.bigtable.grpc.async.RpcThrottlerPerf.java

/**
 * @param pool// w  ww.j ava  2 s .  co m
 * @throws InterruptedException
 * @throws ExecutionException
 * @throws TimeoutException
 */
private static void test(ListeningExecutorService pool)
        throws InterruptedException, ExecutionException, TimeoutException {
    final RpcThrottler underTest = new RpcThrottler(new ResourceLimiter(SIZE, (int) SIZE));
    final LinkedBlockingQueue<Long> registeredEvents = new LinkedBlockingQueue<>();

    final int writerCount = 1;
    final int readerCount = 20;
    Runnable writePerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startReg = System.nanoTime();
            int offerCount = REGISTER_COUNT / writerCount;
            try {
                for (int i = 0; i < offerCount; i++) {
                    registeredEvents.add(underTest.registerOperationWithHeapSize(1));
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startReg;
                System.out.println(String.format("Registered %d in %d ms.  %d nanos/reg.  %f offer/sec",
                        offerCount, totalTime / 1000000, totalTime / offerCount,
                        offerCount * 1000000000.0 / totalTime));
            }
        }
    };
    Runnable readPerfRunnable = new Runnable() {
        @Override
        public void run() {
            long startComplete = System.nanoTime();
            int regCount = REGISTER_COUNT / readerCount;
            try {
                for (int i = 0; i < regCount; i++) {
                    Long registeredId = registeredEvents.poll(1, TimeUnit.SECONDS);
                    if (registeredId == null) {
                        i--;
                    } else {
                        underTest.onRpcCompletion(registeredId);
                    }
                }
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            } finally {
                long totalTime = System.nanoTime() - startComplete;
                System.out.println(String.format("onRpcCompletion %d in %d.  %d nanos/complete.  %f unreg/sec",
                        regCount, totalTime / 1000000, totalTime / regCount,
                        regCount * 1000000000.0 / totalTime));
            }
        }
    };

    List<ListenableFuture<?>> writerFutures = new ArrayList<>();
    List<ListenableFuture<?>> readerFutures = new ArrayList<>();

    for (int i = 0; i < writerCount; i++) {
        writerFutures.add(pool.submit(writePerfRunnable));
    }
    Thread.sleep(10);
    for (int i = 0; i < readerCount; i++) {
        readerFutures.add(pool.submit(readPerfRunnable));
    }
    Futures.allAsList(writerFutures).get(300, TimeUnit.MINUTES);
    underTest.awaitCompletion();
}

From source file:com.google.devtools.build.android.AndroidResourceProcessor.java

@Nullable
public SymbolLoader loadResourceSymbolTable(List<SymbolFileProvider> libraries, String appPackageName,
        Path primaryRTxt, Multimap<String, SymbolLoader> libMap) throws IOException {
    // The reported availableProcessors may be higher than the actual resources
    // (on a shared system). On the other hand, a lot of the work is I/O, so it's not completely
    // CPU bound. As a compromise, divide by 2 the reported availableProcessors.
    int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2);
    ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(numThreads));
    try (Closeable closeable = ExecutorServiceCloser.createWith(executorService)) {
        // Load the package names from the manifest files.
        Map<SymbolFileProvider, ListenableFuture<String>> packageJobs = new HashMap<>();
        for (final SymbolFileProvider lib : libraries) {
            packageJobs.put(lib, executorService.submit(new PackageParsingTask(lib.getManifest())));
        }//from   ww w .  j  a  v a 2s.  c  o m
        Map<SymbolFileProvider, String> packageNames = new HashMap<>();
        try {
            for (Map.Entry<SymbolFileProvider, ListenableFuture<String>> entry : packageJobs.entrySet()) {
                packageNames.put(entry.getKey(), entry.getValue().get());
            }
        } catch (InterruptedException | ExecutionException e) {
            throw new IOException("Failed to load package name: ", e);
        }
        // Associate the packages with symbol files.
        for (SymbolFileProvider lib : libraries) {
            String packageName = packageNames.get(lib);
            // If the library package matches the app package skip -- the final app resource IDs are
            // stored in the primaryRTxt file.
            if (appPackageName.equals(packageName)) {
                continue;
            }
            File rFile = lib.getSymbolFile();
            // If the library has no resource, this file won't exist.
            if (rFile.isFile()) {
                SymbolLoader libSymbols = new SymbolLoader(rFile, stdLogger);
                libMap.put(packageName, libSymbols);
            }
        }
        // Even if there are no libraries, load fullSymbolValues, in case we only have resources
        // defined for the binary.
        File primaryRTxtFile = primaryRTxt.toFile();
        SymbolLoader fullSymbolValues = null;
        if (primaryRTxtFile.isFile()) {
            fullSymbolValues = new SymbolLoader(primaryRTxtFile, stdLogger);
        }
        // Now load the symbol files in parallel.
        List<ListenableFuture<?>> loadJobs = new ArrayList<>();
        Iterable<SymbolLoader> toLoad = fullSymbolValues != null
                ? Iterables.concat(libMap.values(), ImmutableList.of(fullSymbolValues))
                : libMap.values();
        for (final SymbolLoader loader : toLoad) {
            loadJobs.add(executorService.submit(new SymbolLoadingTask(loader)));
        }
        try {
            Futures.allAsList(loadJobs).get();
        } catch (InterruptedException | ExecutionException e) {
            throw new IOException("Failed to load SymbolFile: ", e);
        }
        return fullSymbolValues;
    }
}

From source file:org.jclouds.openstack.swift.v1.blobstore.RegionScopedSwiftBlobStore.java

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides,
        ListeningExecutorService executor) {
    ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();

    long contentLength = checkNotNull(blob.getMetadata().getContentMetadata().getContentLength(),
            "must provide content-length to use multi-part upload");
    MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
            getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
    long partSize = algorithm.calculateChunkSize(contentLength);
    MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), partSize, overrides);
    int partNumber = 0;

    for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
        BlobUploader b = new BlobUploader(mpu, partNumber++, payload);
        parts.add(executor.submit(b));
    }/*from   www . j a v  a2s  . co m*/

    return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
}

From source file:io.druid.query.groupby.GroupByQueryRunnerFactory.java

@Override
public QueryRunner<Row> mergeRunners(final ExecutorService exec, Iterable<QueryRunner<Row>> queryRunners) {
    // mergeRunners should take ListeningExecutorService at some point
    final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(exec);

    if (config.get().isSingleThreaded()) {
        return new ConcatQueryRunner<>(Sequences.map(Sequences.simple(queryRunners),
                new Function<QueryRunner<Row>, QueryRunner<Row>>() {
                    @Override//from  w w  w  .j a  v a 2  s  . c om
                    public QueryRunner<Row> apply(final QueryRunner<Row> input) {
                        return new QueryRunner<Row>() {
                            @Override
                            public Sequence<Row> run(final Query<Row> query,
                                    final Map<String, Object> responseContext) {
                                final GroupByQuery queryParam = (GroupByQuery) query;
                                final Pair<IncrementalIndex, Accumulator<IncrementalIndex, Row>> indexAccumulatorPair = GroupByQueryHelper
                                        .createIndexAccumulatorPair(queryParam, config.get(),
                                                computationBufferPool);
                                final Pair<Queue, Accumulator<Queue, Row>> bySegmentAccumulatorPair = GroupByQueryHelper
                                        .createBySegmentAccumulatorPair();
                                final int priority = query.getContextPriority(0);
                                final boolean bySegment = query.getContextBySegment(false);

                                final ListenableFuture<Void> future = queryExecutor
                                        .submit(new AbstractPrioritizedCallable<Void>(priority) {
                                            @Override
                                            public Void call() throws Exception {
                                                if (bySegment) {
                                                    input.run(queryParam, responseContext).accumulate(
                                                            bySegmentAccumulatorPair.lhs,
                                                            bySegmentAccumulatorPair.rhs);
                                                } else {
                                                    input.run(query, responseContext).accumulate(
                                                            indexAccumulatorPair.lhs, indexAccumulatorPair.rhs);
                                                }

                                                return null;
                                            }
                                        });
                                try {
                                    queryWatcher.registerQuery(query, future);
                                    final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT,
                                            (Number) null);
                                    if (timeout == null) {
                                        future.get();
                                    } else {
                                        future.get(timeout.longValue(), TimeUnit.MILLISECONDS);
                                    }
                                } catch (InterruptedException e) {
                                    log.warn(e, "Query interrupted, cancelling pending results, query id [%s]",
                                            query.getId());
                                    future.cancel(true);
                                    throw new QueryInterruptedException("Query interrupted");
                                } catch (CancellationException e) {
                                    throw new QueryInterruptedException("Query cancelled");
                                } catch (TimeoutException e) {
                                    log.info("Query timeout, cancelling pending results for query id [%s]",
                                            query.getId());
                                    future.cancel(true);
                                    throw new QueryInterruptedException("Query timeout");
                                } catch (ExecutionException e) {
                                    throw Throwables.propagate(e.getCause());
                                }

                                if (bySegment) {
                                    return Sequences.simple(bySegmentAccumulatorPair.lhs);
                                }

                                return Sequences
                                        .simple(indexAccumulatorPair.lhs.iterableWithPostAggregations(null));
                            }
                        };
                    }
                }));
    } else {

        return new GroupByParallelQueryRunner(queryExecutor, config, queryWatcher, computationBufferPool,
                queryRunners);
    }
}

From source file:com.google.gerrit.server.git.BatchUpdate.java

private void executeChangeOps(boolean parallel) throws UpdateException, RestApiException {
    ListeningExecutorService executor = parallel ? changeUpdateExector
            : MoreExecutors.newDirectExecutorService();

    List<ChangeTask> tasks = new ArrayList<>(ops.keySet().size());
    try {//www . j a va2 s.c  om
        if (notesMigration.commitChangeWrites() && repo != null) {
            // A NoteDb change may have been rebuilt since the repo was originally
            // opened, so make sure we see that.
            repo.scanForRepoChanges();
        }
        if (!ops.isEmpty() && notesMigration.failChangeWrites()) {
            // Fail fast before attempting any writes if changes are read-only, as
            // this is a programmer error.
            throw new OrmException(NoteDbUpdateManager.CHANGES_READ_ONLY);
        }
        List<ListenableFuture<?>> futures = new ArrayList<>(ops.keySet().size());
        for (Map.Entry<Change.Id, Collection<Op>> e : ops.asMap().entrySet()) {
            ChangeTask task = new ChangeTask(e.getKey(), e.getValue(), Thread.currentThread());
            tasks.add(task);
            futures.add(executor.submit(task));
        }
        long startNanos = System.nanoTime();
        Futures.allAsList(futures).get();
        maybeLogSlowUpdate(startNanos, "change");

        if (notesMigration.commitChangeWrites()) {
            startNanos = System.nanoTime();
            executeNoteDbUpdates(tasks);
            maybeLogSlowUpdate(startNanos, "NoteDb");
        }
    } catch (ExecutionException | InterruptedException e) {
        Throwables.propagateIfInstanceOf(e.getCause(), UpdateException.class);
        Throwables.propagateIfInstanceOf(e.getCause(), RestApiException.class);
        throw new UpdateException(e);
    } catch (OrmException | IOException e) {
        throw new UpdateException(e);
    }

    // Reindex changes.
    for (ChangeTask task : tasks) {
        if (task.deleted) {
            indexFutures.add(indexer.deleteAsync(task.id));
        } else if (task.dirty) {
            indexFutures.add(indexer.indexAsync(project, task.id));
        }
    }
}

From source file:org.jclouds.openstack.swift.v1.blobstore.RegionScopedSwiftBlobStore.java

@Override
@Beta//  ww w.j  a v  a2  s.  co m
public void downloadBlob(String container, String name, File destination, ExecutorService executor) {

    ListeningExecutorService listeningExecutor = MoreExecutors.listeningDecorator(executor);
    RandomAccessFile raf = null;
    File tempFile = new File(destination.getName() + "." + UUID.randomUUID());
    try {
        long contentLength = api.getObjectApi(regionId, container).getWithoutBody(name).getPayload()
                .getContentMetadata().getContentLength();

        // Reserve space for performance reasons
        raf = new RandomAccessFile(tempFile, "rw");
        raf.seek(contentLength - 1);
        raf.write(0);

        // Determine download buffer size, smaller means less memory usage; larger is faster as long as threads are saturated
        long partSize = getMinimumMultipartPartSize();

        // Loop through ranges within the file
        long from;
        long to;
        List<ListenableFuture<Void>> results = new ArrayList<ListenableFuture<Void>>();

        for (from = 0; from < contentLength; from = from + partSize) {
            to = (from + partSize >= contentLength) ? contentLength - 1 : from + partSize - 1;
            BlobDownloader b = new BlobDownloader(regionId, container, name, raf, from, to);
            results.add(listeningExecutor.submit(b));
        }

        Futures.getUnchecked(Futures.allAsList(results));

        raf.getChannel().force(true);
        raf.getChannel().close();
        raf.close();

        if (destination.exists()) {
            destination.delete();
        }
        if (!tempFile.renameTo(destination)) {
            throw new RuntimeException(
                    "Could not move temporary downloaded file to destination " + destination);
        }
        tempFile = null;
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        Closeables2.closeQuietly(raf);
        if (tempFile != null) {
            tempFile.delete();
        }
    }
}

From source file:org.jclouds.openstack.swift.v1.blobstore.RegionScopedSwiftBlobStore.java

@Beta
@Override//w ww  .j a v a  2 s .  co  m
public InputStream streamBlob(final String container, final String name, final ExecutorService executor) {

    final ListeningExecutorService listeningExecutor = MoreExecutors.listeningDecorator(executor);
    // User will receive the Input end of the piped stream
    final PipedOutputStream output;
    final PipedInputStream input;
    try {
        output = new PipedOutputStream();
        input = new PipedInputStream(output,
                getMinimumMultipartPartSize() * 5 > Integer.MAX_VALUE ? Integer.MAX_VALUE
                        : (int) getMinimumMultipartPartSize() * 5);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    // The total length of the file to download is needed to determine ranges
    // It has to be obtainable without downloading the whole file
    final long contentLength = api.getObjectApi(regionId, container).getWithoutBody(name).getPayload()
            .getContentMetadata().getContentLength();

    // Determine download buffer size, smaller means less memory usage; larger is faster as long as threads are saturated
    final long partSize = getMinimumMultipartPartSize();

    // Used to communicate between the producer and consumer threads
    final LinkedBlockingQueue<ListenableFuture<byte[]>> results = new LinkedBlockingQueue<ListenableFuture<byte[]>>();

    listeningExecutor.submit(new Runnable() {
        @Override
        public void run() {
            ListenableFuture<byte[]> result;
            long from;
            try {
                for (from = 0; from < contentLength; from = from + partSize) {
                    logger.debug(Thread.currentThread() + " writing to output");
                    result = results.take();
                    if (result == null) {
                        output.close();
                        input.close();
                        throw new RuntimeException("Error downloading file part to stream");
                    }
                    output.write(result.get());
                }
            } catch (Exception e) {
                logger.debug(e.toString());
                // Close pipe so client is notified of an exception
                Closeables2.closeQuietly(input);
                throw new RuntimeException(e);
            } finally {
                // Finished writing results to stream
                Closeables2.closeQuietly(output);
            }
        }
    });

    listeningExecutor.submit(new Runnable() {
        @Override
        public void run() {
            long from;
            long to;
            // Loop through ranges within the file
            for (from = 0; from < contentLength; from = from + partSize) {
                to = (from + partSize >= contentLength) ? contentLength - 1 : from + partSize - 1;
                BlobStreamDownloader b = new BlobStreamDownloader(container, name, from, to);
                results.add(listeningExecutor.submit(b));
            }
        }
    });
    return input;
}

From source file:com.google.gerrit.server.update.ReviewDbBatchUpdate.java

private List<ChangeTask> executeChangeOps(boolean parallel, boolean dryrun)
        throws UpdateException, RestApiException {
    List<ChangeTask> tasks;
    boolean success = false;
    Stopwatch sw = Stopwatch.createStarted();
    try {//from  w w  w.  j  av a2s . c  o m
        logDebug("Executing change ops (parallel? {})", parallel);
        ListeningExecutorService executor = parallel ? changeUpdateExector
                : MoreExecutors.newDirectExecutorService();

        tasks = new ArrayList<>(ops.keySet().size());
        try {
            if (notesMigration.commitChangeWrites() && repoView != null) {
                // A NoteDb change may have been rebuilt since the repo was originally
                // opened, so make sure we see that.
                logDebug("Preemptively scanning for repo changes");
                repoView.getRepository().scanForRepoChanges();
            }
            if (!ops.isEmpty() && notesMigration.failChangeWrites()) {
                // Fail fast before attempting any writes if changes are read-only, as
                // this is a programmer error.
                logDebug("Failing early due to read-only Changes table");
                throw new OrmException(NoteDbUpdateManager.CHANGES_READ_ONLY);
            }
            List<ListenableFuture<?>> futures = new ArrayList<>(ops.keySet().size());
            for (Map.Entry<Change.Id, Collection<BatchUpdateOp>> e : ops.asMap().entrySet()) {
                ChangeTask task = new ChangeTask(e.getKey(), e.getValue(), Thread.currentThread(), dryrun);
                tasks.add(task);
                if (!parallel) {
                    logDebug("Direct execution of task for ops: {}", ops);
                }
                futures.add(executor.submit(task));
            }
            if (parallel) {
                logDebug("Waiting on futures for {} ops spanning {} changes", ops.size(), ops.keySet().size());
            }
            Futures.allAsList(futures).get();

            if (notesMigration.commitChangeWrites()) {
                if (!dryrun) {
                    executeNoteDbUpdates(tasks);
                }
            }
            success = true;
        } catch (ExecutionException | InterruptedException e) {
            Throwables.throwIfInstanceOf(e.getCause(), UpdateException.class);
            Throwables.throwIfInstanceOf(e.getCause(), RestApiException.class);
            throw new UpdateException(e);
        } catch (OrmException | IOException e) {
            throw new UpdateException(e);
        }
    } finally {
        metrics.executeChangeOpsLatency.record(success, sw.elapsed(NANOSECONDS), NANOSECONDS);
    }
    return tasks;
}

From source file:com.facebook.buck.distributed.build_client.PreBuildPhase.java

/** Run all steps required before the build. */
public Pair<StampedeId, ListenableFuture<Void>> runPreDistBuildLocalStepsAsync(
        ListeningExecutorService networkExecutorService, ProjectFilesystem projectFilesystem,
        FileHashCache fileHashCache, BuckEventBus eventBus, BuildId buildId, BuildMode buildMode,
        MinionRequirements minionRequirements, String repository, String tenantId,
        ListenableFuture<ParallelRuleKeyCalculator<RuleKey>> localRuleKeyCalculatorFuture)
        throws IOException, DistBuildRejectedException {
    ConsoleEventsDispatcher consoleEventsDispatcher = new ConsoleEventsDispatcher(eventBus);

    distBuildClientStats.startTimer(CREATE_DISTRIBUTED_BUILD);
    List<String> buildTargets = topLevelTargets.stream().map(BuildTarget::getFullyQualifiedName).sorted()
            .collect(Collectors.toList());
    BuildJob job = distBuildService.createBuild(buildId, buildMode, minionRequirements, repository, tenantId,
            buildTargets, buildLabel);//  w  w w .  j  ava2s  .c o m
    distBuildClientStats.stopTimer(CREATE_DISTRIBUTED_BUILD);

    if (job.getBuildLabel() != null) {
        // Override the build label with the server-side inferred label.
        this.buildLabel = job.getBuildLabel();
        distBuildClientStats.setUserOrInferredBuildLabel(buildLabel);
    }

    StampedeId stampedeId = job.getStampedeId();
    eventBus.post(new DistBuildCreatedEvent(stampedeId));

    LOG.info("Created job. StampedeId = " + stampedeId.getId());

    consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of(), "SERIALIZING AND UPLOADING DATA");

    List<ListenableFuture<?>> asyncJobs = new LinkedList<>();

    asyncJobs.add(Futures.transformAsync(asyncJobState, jobState -> {
        LOG.info("Uploading local changes.");
        return distBuildService.uploadMissingFilesAsync(distBuildCellIndexer.getLocalFilesystemsByCellIndex(),
                jobState.fileHashes, distBuildClientStats, networkExecutorService);
    }, networkExecutorService));

    asyncJobs.add(Futures.transform(asyncJobState, jobState -> {
        LOG.info("Uploading target graph.");
        try {
            distBuildService.uploadTargetGraph(jobState, stampedeId, distBuildClientStats);
        } catch (IOException e) {
            throw new RuntimeException("Failed to upload target graph with exception.", e);
        }
        return null;
    }, networkExecutorService));

    LOG.info("Uploading buck dot-files.");
    asyncJobs.add(distBuildService.uploadBuckDotFilesAsync(stampedeId, projectFilesystem, fileHashCache,
            distBuildClientStats, networkExecutorService));

    asyncJobs.add(networkExecutorService.submit(() -> {
        LOG.info("Setting buck version.");
        try {
            distBuildService.setBuckVersion(stampedeId, buckVersion, distBuildClientStats);
        } catch (IOException e) {
            throw new RuntimeException("Failed to set buck-version with exception.", e);
        }
    }));

    DistBuildConfig distBuildConfig = new DistBuildConfig(buildExecutorArgs.getBuckConfig());

    if (distBuildConfig.isUploadFromLocalCacheEnabled()) {
        asyncJobs.add(Futures.transformAsync(localRuleKeyCalculatorFuture, localRuleKeyCalculator -> {
            try (ArtifactCacheByBuildRule artifactCache = new DistBuildArtifactCacheImpl(
                    actionAndTargetGraphs.getActionGraphAndBuilder().getActionGraphBuilder(),
                    networkExecutorService,
                    buildExecutorArgs.getArtifactCacheFactory().remoteOnlyInstance(true, false), eventBus,
                    localRuleKeyCalculator,
                    Optional.of(buildExecutorArgs.getArtifactCacheFactory().localOnlyInstance(true, false)))) {

                return new CacheOptimizedBuildTargetsQueueFactory(
                        actionAndTargetGraphs.getActionGraphAndBuilder().getActionGraphBuilder(), artifactCache,
                        /* isDeepRemoteBuild */ false, localRuleKeyCalculator.getRuleDepsCache(),
                        /* shouldBuildSelectedTargetsLocally */ false)
                                .uploadCriticalNodesFromLocalCache(topLevelTargets, distBuildClientStats);

            } catch (Exception e) {
                LOG.error(e, "Failed to create BuildTargetsQueue.");
                throw new RuntimeException(e);
            }
        }, networkExecutorService));
    }

    ListenableFuture<Void> asyncPrep = Futures.transform(Futures.allAsList(asyncJobs), results -> {
        LOG.info("Finished async preparation of stampede job.");
        consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of(), "STARTING REMOTE BUILD");

        // Everything is now setup remotely to run the distributed build. No more local prep.
        this.distBuildClientStats.stopTimer(LOCAL_PREPARATION);
        return null;
    }, MoreExecutors.directExecutor());

    return new Pair<StampedeId, ListenableFuture<Void>>(stampedeId, asyncPrep);
}