List of usage examples for com.google.common.util.concurrent Futures transformAsync
public static <I, O> ListenableFuture<O> transformAsync(ListenableFuture<I> input, AsyncFunction<? super I, ? extends O> function, Executor executor)
From source file:com.orangerhymelabs.helenus.cassandra.document.historical.HistoricalDocumentRepository.java
public ListenableFuture<HistoricalDocument> upsert(HistoricalDocument entity) { ListenableFuture<ResultSet> future = submitUpsert(entity); return Futures.transformAsync(future, new AsyncFunction<ResultSet, HistoricalDocument>() { @Override//from w w w . j a v a 2 s .c o m public ListenableFuture<HistoricalDocument> apply(ResultSet result) throws Exception { if (result.wasApplied()) { return Futures.immediateFuture(entity); } //TODO: This doesn't provide any informational value... what should it be? return Futures.immediateFailedFuture(new StorageException(String .format("Table %s failed to store document: %s", table.toDbTable(), entity.toString()))); } }, MoreExecutors.directExecutor()); }
From source file:com.facebook.buck.core.build.distributed.synchronization.impl.RemoteBuildRuleSynchronizer.java
private synchronized ListenableFuture<CacheResult> getNextCacheCheckResult(CacheResult result, Supplier<ListenableFuture<CacheResult>> cacheCheck, String buildTarget, int backOffNumber) { if (cancelAllCacheSyncBackOffs || unlockedRules.contains(buildTarget)) { LOG.info("Rule [%s] - cannot back off as rule unlocked or all backofffs cancelled.", buildTarget); return Futures.immediateFuture(result); }// w w w .j a va 2s.co m // Do not allow backoffs if completion happened longer than limit ago. long now = clock.currentTimeMillis(); long completionTimestamp = completionTimestampsByBuildTarget.getOrDefault(buildTarget, now); // If entire build was completed and it was already signalled - use it for completion timestamp. completionTimestamp = remoteBuildFinished ? Math.min(completionTimestamp, remoteBuildFinishedTimestamp) : completionTimestamp; long maxBackoffTimestamp = completionTimestamp + cacheSyncMaxTotalBackoffMillis; if (maxBackoffTimestamp < now) { LOG.info("Rule [%s] was completed %d millis ago - cannot back off (max total back off: %d millis).", buildTarget, now - completionTimestamp, cacheSyncMaxTotalBackoffMillis); return Futures.immediateFuture(result); } long backOffMillis = Math.min(backOffsMillis[backOffNumber], maxBackoffTimestamp - now); LOG.info("Rule [%s], completed %d millis ago, not ready after %d attempts. Backing off %d millis.", buildTarget, now - completionTimestamp, backOffNumber + 1, backOffMillis); // Register timestamped settable future for the rule. SettableFuture<Void> backOffFuture = SettableFuture.create(); backedOffBuildRulesWaitingForCacheSync .add(new TimestampedBuildRuleCacheSyncFuture(now + backOffMillis, backOffFuture)); // Use 'scheduler' as executor instead of direct executor so that when // triggerCacheChecksForBackedOffBuildRulesWithSyncedCache() runs, it only does unlocking of // ready futures and doesn't execute any code of cacheCheck -> it will execute quickly. return Futures.transformAsync(backOffFuture, (Void v) -> cacheCheck.get(), scheduler); }
From source file:com.orangerhymelabs.helenus.cassandra.document.DocumentService.java
public ListenableFuture<Document> update(String database, String table, Document document) { ListenableFuture<AbstractDocumentRepository> docs = acquireRepositoryFor(database, table); return Futures.transformAsync(docs, new AsyncFunction<AbstractDocumentRepository, Document>() { @Override//from ww w . ja v a 2 s . co m public ListenableFuture<Document> apply(AbstractDocumentRepository input) throws Exception { try { ValidationEngine.validateAndThrow(document); return input.update(document); } catch (ValidationException e) { return Futures.immediateFailedFuture(e); } } }, MoreExecutors.directExecutor()); }
From source file:com.facebook.buck.util.concurrent.ResourcePool.java
@Override public synchronized void close() { Preconditions.checkState(!closing.get()); closing.set(true);//from w w w . ja va 2 s . co m // Unblock all waiting requests. for (SettableFuture<Void> request : resourceRequests) { request.set(null); } resourceRequests.clear(); // Any processing that is currently taking place will be allowed to complete (as it won't notice // `closing` is true. // Any scheduled (but not executing) resource requests should notice `closing` is true and // mark themselves as cancelled. // Therefore `closeFuture` should allow us to wait for any resources that are in use. ListenableFuture<List<Object>> closeFuture = Futures.successfulAsList(pendingWork); // As silly as it seems this is the only reliable way to make sure we run the shutdown code. // Reusing an external executor means we run the risk of it being shut down before the cleanup // future is ready to run (which causes it to never run). // Using a direct executor means we run the chance of executing shutdown synchronously (which // we try to avoid). final ExecutorService executorService = MostExecutors.newSingleThreadExecutor("resource shutdown"); // It is possible that more requests for work are scheduled at this point, however they should // all early-out due to `closing` being set to true, so we don't really care about those. shutdownFuture = Futures.transformAsync(closeFuture, new AsyncFunction<List<Object>, Void>() { @Override public ListenableFuture<Void> apply(List<Object> input) throws Exception { synchronized (ResourcePool.this) { if (parkedResources.size() != createdResources.size()) { LOG.error("Whoops! Some resource are still in use during shutdown."); } // Now that pending work is done we can close all resources. for (R resource : createdResources) { resource.close(); } if (!resourceRequests.isEmpty()) { LOG.error("Error shutting down ResourcePool: " + "there should be no enqueued resource requests."); } } executorService.shutdown(); return Futures.immediateFuture(null); } }, executorService); }
From source file:com.orangerhymelabs.helenus.cassandra.document.AbstractDocumentRepository.java
public ListenableFuture<Document> upsert(Document entity) { ListenableFuture<ResultSet> future = submitUpsert(entity); return Futures.transformAsync(future, new AsyncFunction<ResultSet, Document>() { @Override//from w w w .j ava 2 s. c o m public ListenableFuture<Document> apply(ResultSet result) throws Exception { if (result.wasApplied()) { return Futures.immediateFuture(entity); } //TODO: This doesn't provide any informational value... what should it be? return Futures.immediateFailedFuture(new StorageException( String.format("Table %s failed to store document: %s", tableName, entity.toString()))); } }, MoreExecutors.directExecutor()); }
From source file:com.facebook.buck.rules.CachingBuildRuleBuilder.java
ListenableFuture<BuildResult> build() { final AtomicReference<Long> outputSize = Atomics.newReference(); ListenableFuture<List<BuildResult>> depResults = Futures.immediateFuture(Collections.emptyList()); // If we're performing a deep build, guarantee that all dependencies will *always* get // materialized locally if (buildMode == CachingBuildEngine.BuildMode.DEEP || buildMode == CachingBuildEngine.BuildMode.POPULATE_FROM_REMOTE_CACHE) { depResults = buildRuleBuilderDelegate.getDepResults(rule, buildContext, executionContext); }/*w w w . j av a 2 s . com*/ ListenableFuture<BuildResult> buildResult = Futures.transformAsync(depResults, input -> buildOrFetchFromCache(), serviceByAdjustingDefaultWeightsTo(CachingBuildEngine.SCHEDULING_MORE_WORK_RESOURCE_AMOUNTS)); // Check immediately (without posting a new task) for a failure so that we can short-circuit // pending work. Use .catchingAsync() instead of .catching() so that we can propagate unchecked // exceptions. buildResult = Futures.catchingAsync(buildResult, Throwable.class, throwable -> { Preconditions.checkNotNull(throwable); buildRuleBuilderDelegate.setFirstFailure(throwable); Throwables.throwIfInstanceOf(throwable, Exception.class); throw new RuntimeException(throwable); }); buildResult = Futures.transform(buildResult, (result) -> { buildRuleBuilderDelegate.markRuleAsUsed(rule, buildContext.getEventBus()); return result; }, MoreExecutors.directExecutor()); // Setup a callback to handle either the cached or built locally cases. AsyncFunction<BuildResult, BuildResult> callback = input -> { // If we weren't successful, exit now. if (input.getStatus() != BuildRuleStatus.SUCCESS) { return Futures.immediateFuture(input); } try (Scope scope = LeafEvents.scope(buildContext.getEventBus(), "finalizing_build_rule")) { // We shouldn't see any build fail result at this point. BuildRuleSuccessType success = Preconditions.checkNotNull(input.getSuccess()); // If we didn't build the rule locally, reload the recorded paths from the build // metadata. if (success != BuildRuleSuccessType.BUILT_LOCALLY) { try { for (String str : onDiskBuildInfo.getValuesOrThrow(BuildInfo.MetadataKey.RECORDED_PATHS)) { buildInfoRecorder.recordArtifact(Paths.get(str)); } } catch (IOException e) { LOG.error(e, "Failed to read RECORDED_PATHS for %s", rule); throw e; } } // Try get the output size now that all outputs have been recorded. if (success == BuildRuleSuccessType.BUILT_LOCALLY) { outputSize.set(buildInfoRecorder.getOutputSize()); } // If the success type means the rule has potentially changed it's outputs... if (success.outputsHaveChanged()) { // The build has succeeded, whether we've fetched from cache, or built locally. // So run the post-build steps. if (rule instanceof HasPostBuildSteps) { executePostBuildSteps( ((HasPostBuildSteps) rule).getPostBuildSteps(buildContext.getBuildContext())); } // Invalidate any cached hashes for the output paths, since we've updated them. for (Path path : buildInfoRecorder.getRecordedPaths()) { fileHashCache.invalidate(rule.getProjectFilesystem().resolve(path)); } } if (SupportsInputBasedRuleKey.isSupported(rule) && success == BuildRuleSuccessType.BUILT_LOCALLY && !buildInfoRecorder.getBuildMetadataFor(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY) .isPresent()) { // Doing this here is probably not strictly necessary, however in the case of // pipelined rules built locally we will never do an input-based cache check. // That check would have written the key to metadata, and there are some asserts // during cache upload that try to ensure they are present. Optional<RuleKey> inputRuleKey = calculateInputBasedRuleKey(buildContext.getEventBus()); if (inputRuleKey.isPresent()) { buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY, inputRuleKey.get().toString()); } } // If this rule uses dep files and we built locally, make sure we store the new dep file // list and re-calculate the dep file rule key. if (useDependencyFileRuleKey() && success == BuildRuleSuccessType.BUILT_LOCALLY) { // Query the rule for the actual inputs it used. ImmutableList<SourcePath> inputs = ((SupportsDependencyFileRuleKey) rule) .getInputsAfterBuildingLocally(buildContext.getBuildContext(), executionContext.getCellPathResolver()); // Record the inputs into our metadata for next time. // TODO(#9117006): We don't support a way to serlialize `SourcePath`s to the cache, // so need to use DependencyFileEntry's instead and recover them on deserialization. ImmutableList<String> inputStrings = inputs.stream() .map(inputString -> DependencyFileEntry.fromSourcePath(inputString, pathResolver)) .map(MoreFunctions.toJsonFunction()).collect(MoreCollectors.toImmutableList()); buildInfoRecorder.addMetadata(BuildInfo.MetadataKey.DEP_FILE, inputStrings); // Re-calculate and store the depfile rule key for next time. Optional<RuleKeyAndInputs> depFileRuleKeyAndInputs = calculateDepFileRuleKey( Optional.of(inputStrings), /* allowMissingInputs */ false); if (depFileRuleKeyAndInputs.isPresent()) { RuleKey depFileRuleKey = depFileRuleKeyAndInputs.get().getRuleKey(); buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.DEP_FILE_RULE_KEY, depFileRuleKey.toString()); // Push an updated manifest to the cache. if (useManifestCaching()) { Optional<RuleKeyAndInputs> manifestKey = calculateManifestKey( buildContext.getEventBus()); if (manifestKey.isPresent()) { buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.MANIFEST_KEY, manifestKey.get().getRuleKey().toString()); updateAndStoreManifest(depFileRuleKeyAndInputs.get().getRuleKey(), depFileRuleKeyAndInputs.get().getInputs(), manifestKey.get(), buildContext.getArtifactCache()); } } } } // If this rule was built locally, grab and record the output hashes in the build // metadata so that cache hits avoid re-hashing file contents. Since we use output // hashes for input-based rule keys and for detecting non-determinism, we would spend // a lot of time re-hashing output paths -- potentially in serialized in a single step. // So, do the hashing here to distribute the workload across several threads and cache // the results. // // Also, since hashing outputs can potentially be expensive, we avoid doing this for // rules that are marked as uncacheable. The rationale here is that they are likely not // cached due to the sheer size which would be costly to hash or builtin non-determinism // in the rule which somewhat defeats the purpose of logging the hash. if (success == BuildRuleSuccessType.BUILT_LOCALLY && shouldUploadToCache(success, Preconditions.checkNotNull(outputSize.get()))) { ImmutableSortedMap.Builder<String, String> outputHashes = ImmutableSortedMap.naturalOrder(); for (Path path : buildInfoRecorder.getOutputPaths()) { outputHashes.put(path.toString(), fileHashCache.get(rule.getProjectFilesystem().resolve(path)).toString()); } buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.RECORDED_PATH_HASHES, outputHashes.build()); } // If this rule was fetched from cache, seed the file hash cache with the recorded // output hashes from the build metadata. Since outputs which have been changed have // already been invalidated above, this is purely a best-effort optimization -- if the // the output hashes weren't recorded in the cache we do nothing. if (success != BuildRuleSuccessType.BUILT_LOCALLY && success.outputsHaveChanged()) { Optional<ImmutableMap<String, String>> hashes = onDiskBuildInfo .getBuildMap(BuildInfo.MetadataKey.RECORDED_PATH_HASHES); // We only seed after first verifying the recorded path hashes. This prevents the // optimization, but is useful to keep in place for a while to verify this optimization // is causing issues. if (hashes.isPresent() && verifyRecordedPathHashes(rule.getBuildTarget(), rule.getProjectFilesystem(), hashes.get())) { // Seed the cache with the hashes. for (Map.Entry<String, String> ent : hashes.get().entrySet()) { Path path = rule.getProjectFilesystem().getPath(ent.getKey()); HashCode hashCode = HashCode.fromString(ent.getValue()); fileHashCache.set(rule.getProjectFilesystem().resolve(path), hashCode); } } } // Make sure the origin field is filled in. BuildId buildId = buildContext.getBuildId(); if (success == BuildRuleSuccessType.BUILT_LOCALLY) { buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.ORIGIN_BUILD_ID, buildId.toString()); } else if (success.outputsHaveChanged()) { Preconditions.checkState( buildInfoRecorder.getBuildMetadataFor(BuildInfo.MetadataKey.ORIGIN_BUILD_ID) .isPresent(), "Cache hits must populate the %s field (%s)", BuildInfo.MetadataKey.ORIGIN_BUILD_ID, success); } // Make sure that all of the local files have the same values they would as if the // rule had been built locally. buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.TARGET, rule.getBuildTarget().toString()); buildInfoRecorder.addMetadata(BuildInfo.MetadataKey.RECORDED_PATHS, buildInfoRecorder.getRecordedPaths().stream().map(Object::toString) .collect(MoreCollectors.toImmutableList())); if (success.shouldWriteRecordedMetadataToDiskAfterBuilding()) { try { boolean clearExistingMetadata = success.shouldClearAndOverwriteMetadataOnDisk(); buildInfoRecorder.writeMetadataToDisk(clearExistingMetadata); } catch (IOException e) { throw new IOException(String.format("Failed to write metadata to disk for %s.", rule), e); } } // Give the rule a chance to populate its internal data structures now that all of // the files should be in a valid state. try { if (rule instanceof InitializableFromDisk) { doInitializeFromDisk((InitializableFromDisk<?>) rule); } } catch (IOException e) { throw new IOException( String.format("Error initializing %s from disk: %s.", rule, e.getMessage()), e); } } return Futures.immediateFuture(input); }; buildResult = Futures.transformAsync(buildResult, ruleAsyncFunction(buildContext.getEventBus(), callback), serviceByAdjustingDefaultWeightsTo(CachingBuildEngine.RULE_KEY_COMPUTATION_RESOURCE_AMOUNTS)); buildResult = Futures.catchingAsync(buildResult, Throwable.class, thrown -> { LOG.debug(thrown, "Building rule [%s] failed.", rule.getBuildTarget()); if (consoleLogBuildFailuresInline) { buildContext.getEventBus().post(ConsoleEvent.severe(getErrorMessageIncludingBuildRule(thrown))); } thrown = maybeAttachBuildRuleNameToException(thrown); recordFailureAndCleanUp(thrown); return Futures.immediateFuture(BuildResult.failure(rule, thrown)); }); // Do things that need to happen after either success or failure, but don't block the dependents // while doing so: buildRuleBuilderDelegate .addAsyncCallback(MoreFutures.addListenableCallback(buildResult, new FutureCallback<BuildResult>() { private void uploadToCache(BuildRuleSuccessType success) { // Collect up all the rule keys we have index the artifact in the cache with. Set<RuleKey> ruleKeys = new HashSet<>(); // If the rule key has changed (and is not already in the cache), we need to push // the artifact to cache using the new key. ruleKeys.add(ruleKeyFactories.getDefaultRuleKeyFactory().build(rule)); // If the input-based rule key has changed, we need to push the artifact to cache // using the new key. if (SupportsInputBasedRuleKey.isSupported(rule)) { Optional<RuleKey> calculatedRuleKey = calculateInputBasedRuleKey( buildContext.getEventBus()); Optional<RuleKey> onDiskRuleKey = onDiskBuildInfo .getRuleKey(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY); Optional<RuleKey> metaDataRuleKey = buildInfoRecorder .getBuildMetadataFor(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY) .map(RuleKey::new); Preconditions.checkState(calculatedRuleKey.equals(onDiskRuleKey), "%s (%s): %s: invalid on-disk input-based rule key: %s != %s", rule.getBuildTarget(), rule.getType(), success, calculatedRuleKey, onDiskRuleKey); Preconditions.checkState(calculatedRuleKey.equals(metaDataRuleKey), "%s: %s: invalid meta-data input-based rule key: %s != %s", rule.getBuildTarget(), success, calculatedRuleKey, metaDataRuleKey); if (calculatedRuleKey.isPresent()) { ruleKeys.add(calculatedRuleKey.get()); } } // If the manifest-based rule key has changed, we need to push the artifact to cache // using the new key. if (useManifestCaching()) { Optional<RuleKey> onDiskRuleKey = onDiskBuildInfo .getRuleKey(BuildInfo.MetadataKey.DEP_FILE_RULE_KEY); Optional<RuleKey> metaDataRuleKey = buildInfoRecorder .getBuildMetadataFor(BuildInfo.MetadataKey.DEP_FILE_RULE_KEY).map(RuleKey::new); Preconditions.checkState(onDiskRuleKey.equals(metaDataRuleKey), "%s: %s: inconsistent meta-data and on-disk dep-file rule key: %s != %s", rule.getBuildTarget(), success, onDiskRuleKey, metaDataRuleKey); if (onDiskRuleKey.isPresent()) { ruleKeys.add(onDiskRuleKey.get()); } } // Do the actual upload. try { // Verify that the recorded path hashes are accurate. Optional<String> recordedPathHashes = buildInfoRecorder .getBuildMetadataFor(BuildInfo.MetadataKey.RECORDED_PATH_HASHES); if (recordedPathHashes.isPresent() && !verifyRecordedPathHashes(rule.getBuildTarget(), rule.getProjectFilesystem(), recordedPathHashes.get())) { return; } // Push to cache. buildInfoRecorder.performUploadToArtifactCache(ImmutableSet.copyOf(ruleKeys), buildContext.getArtifactCache(), buildContext.getEventBus()); } catch (Throwable t) { buildContext.getEventBus().post( ThrowableConsoleEvent.create(t, "Error uploading to cache for %s.", rule)); } } private void handleResult(BuildResult input) { Optional<Long> outputSize = Optional.empty(); Optional<HashCode> outputHash = Optional.empty(); Optional<BuildRuleSuccessType> successType = Optional.empty(); boolean shouldUploadToCache = false; BuildRuleEvent.Resumed resumedEvent = BuildRuleEvent.resumed(rule, buildRuleDurationTracker, ruleKeyFactories.getDefaultRuleKeyFactory()); LOG.verbose(resumedEvent.toString()); buildContext.getEventBus().post(resumedEvent); if (input.getStatus() == BuildRuleStatus.SUCCESS) { BuildRuleSuccessType success = Preconditions.checkNotNull(input.getSuccess()); successType = Optional.of(success); // Try get the output size. if (success == BuildRuleSuccessType.BUILT_LOCALLY || success.shouldUploadResultingArtifact()) { try { outputSize = Optional.of(buildInfoRecorder.getOutputSize()); } catch (IOException e) { buildContext.getEventBus().post(ThrowableConsoleEvent.create(e, "Error getting output size for %s.", rule)); } } // Compute it's output hash for logging/tracing purposes, as this artifact will // be consumed by other builds. if (outputSize.isPresent() && shouldHashOutputs(success, outputSize.get())) { try { outputHash = Optional.of(buildInfoRecorder.getOutputHash(fileHashCache)); } catch (IOException e) { buildContext.getEventBus().post(ThrowableConsoleEvent.create(e, "Error getting output hash for %s.", rule)); } } // Determine if this is rule is cacheable. shouldUploadToCache = outputSize.isPresent() && shouldUploadToCache(success, outputSize.get()); // Upload it to the cache. if (shouldUploadToCache) { uploadToCache(success); } } boolean failureOrBuiltLocally = input.getStatus() == BuildRuleStatus.FAIL || input.getSuccess() == BuildRuleSuccessType.BUILT_LOCALLY; // Log the result to the event bus. BuildRuleEvent.Finished finished = BuildRuleEvent.finished(resumedEvent, getBuildRuleKeys(), input.getStatus(), input.getCacheResult(), onDiskBuildInfo.getBuildValue(BuildInfo.MetadataKey.ORIGIN_BUILD_ID) .map(BuildId::new), successType, shouldUploadToCache, outputHash, outputSize, getBuildRuleDiagnosticData(failureOrBuiltLocally)); LOG.verbose(finished.toString()); buildContext.getEventBus().post(finished); } @Override public void onSuccess(BuildResult input) { handleResult(input); // Reset interrupted flag once failure has been recorded. if (input.getFailure() instanceof InterruptedException) { Threads.interruptCurrentThread(); } } @Override public void onFailure(@Nonnull Throwable thrown) { throw new AssertionError("Dead code"); } }, serviceByAdjustingDefaultWeightsTo(CachingBuildEngine.RULE_KEY_COMPUTATION_RESOURCE_AMOUNTS))); return buildResult; }
From source file:com.orangerhymelabs.helenus.cassandra.document.DocumentService.java
public ListenableFuture<Document> upsert(String database, String table, Document document) { ListenableFuture<AbstractDocumentRepository> docs = acquireRepositoryFor(database, table); return Futures.transformAsync(docs, new AsyncFunction<AbstractDocumentRepository, Document>() { @Override/*from www.j a v a 2 s . c o m*/ public ListenableFuture<Document> apply(AbstractDocumentRepository input) throws Exception { try { ValidationEngine.validateAndThrow(document); return input.upsert(document); } catch (ValidationException e) { return Futures.immediateFailedFuture(e); } } }, MoreExecutors.directExecutor()); }
From source file:com.facebook.buck.core.build.engine.cache.manager.ManifestRuleKeyManager.java
public ListenableFuture<ManifestFetchResult> performManifestBasedCacheFetch( RuleKeyAndInputs originalRuleKeyAndInputs) { Preconditions.checkArgument(useManifestCaching()); // Explicitly drop the input list from the caller, as holding this in the closure below until // the future eventually runs can potentially consume a lot of memory. RuleKey manifestRuleKey = originalRuleKeyAndInputs.getRuleKey(); originalRuleKeyAndInputs = null;// www . j av a 2 s .c o m // Fetch the manifest from the cache. return Futures.transformAsync(fetchManifest(manifestRuleKey), (@Nonnull CacheResult manifestCacheResult) -> { ManifestFetchResult.Builder manifestFetchResult = ManifestFetchResult.builder(); manifestFetchResult.setManifestCacheResult(manifestCacheResult); if (!manifestCacheResult.getType().isSuccess()) { return Futures.immediateFuture(manifestFetchResult.build()); } // Re-calculate the rule key and the input list. While we do already have the input list // above in `originalRuleKeyAndInputs`, we intentionally don't pass it in and use it here // to avoid holding on to significant memory until this future runs. RuleKeyAndInputs keyAndInputs = manifestBasedKeySupplier.get() .orElseThrow(IllegalStateException::new); // Load the manifest from disk. ManifestLoadResult loadResult = loadManifest(keyAndInputs.getRuleKey()); if (!loadResult.getManifest().isPresent()) { manifestFetchResult.setManifestLoadError(loadResult.getError().get()); return Futures.immediateFuture(manifestFetchResult.build()); } Manifest manifest = loadResult.getManifest().get(); Preconditions.checkState(manifest.getKey().equals(keyAndInputs.getRuleKey()), "%s: found incorrectly keyed manifest: %s != %s", rule.getBuildTarget(), keyAndInputs.getRuleKey(), manifest.getKey()); manifestFetchResult.setManifestStats(manifest.getStats()); // Lookup the dep file rule key matching the current state of our inputs. Optional<RuleKey> depFileRuleKey = manifest.lookup(fileHashCache, pathResolver, keyAndInputs.getInputs()); if (!depFileRuleKey.isPresent()) { return Futures.immediateFuture(manifestFetchResult.build()); } manifestFetchResult.setDepFileRuleKey(depFileRuleKey.get()); // Fetch the rule outputs from cache using the found dep file rule key. return Futures.transform( buildCacheArtifactFetcher .tryToFetchArtifactFromBuildCacheAndOverlayOnTopOfProjectFilesystem( depFileRuleKey.get(), artifactCache, rule.getProjectFilesystem()), (@Nonnull CacheResult ruleCacheResult) -> { manifestFetchResult.setRuleCacheResult(ruleCacheResult); return manifestFetchResult.build(); }, MoreExecutors.directExecutor()); }, MoreExecutors.directExecutor()); }
From source file:com.facebook.buck.rules.modern.builders.RemoteExecutionStrategy.java
private ListenableFuture<Optional<BuildResult>> handleActionInfo(BuildRule rule, BuildStrategyContext strategyContext, BuildTarget buildTarget, RemoteExecutionActionInfo actionInfo, Callable<Throwable> tryStart) throws IOException { Objects.requireNonNull(actionInfo); // The actionInfo may be very large, so explicitly capture just the parts that we need and clear // it (to hopefully catch future bad uses). actionInfo.getRequiredData() in particular may be // very, very large. Digest actionDigest = actionInfo.getActionDigest(); Iterable<? extends Path> actionOutputs = actionInfo.getOutputs(); ImmutableMap<Digest, UploadDataSupplier> requiredData = actionInfo.getRequiredData(); Scope uploadingInputsScope = RemoteExecutionActionEvent.sendEvent(eventBus, State.UPLOADING_INPUTS, buildTarget, Optional.of(actionDigest)); ListenableFuture<Void> inputsUploadedFuture = executionClients.getContentAddressedStorage() .addMissing(requiredData);//from w ww . jav a 2 s . c o m return Futures.transformAsync(inputsUploadedFuture, ignored -> { uploadingInputsScope.close(); return executeNowThatInputsAreReady(rule.getProjectFilesystem(), strategyContext, buildTarget, tryStart, actionDigest, actionOutputs, rule.getFullyQualifiedName()); }, service); }
From source file:com.facebook.presto.transaction.InMemoryTransactionManager.java
@Override public ListenableFuture<?> asyncCommit(TransactionId transactionId) { return nonCancellationPropagating(Futures.transformAsync(removeTransactionMetadataAsFuture(transactionId), TransactionMetadata::asyncCommit, directExecutor())); }