Example usage for com.google.common.util.concurrent Futures transform

List of usage examples for com.google.common.util.concurrent Futures transform

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures transform.

Prototype

public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
        Function<? super I, ? extends O> function, Executor executor) 

Source Link

Document

Returns a new ListenableFuture whose result is the product of applying the given Function to the result of the given Future .

Usage

From source file:org.hawkular.metrics.core.impl.cassandra.MetricsServiceCassandra.java

@Override
public Observable<Void> createTenant(final Tenant tenant) {
    return dataAccess.insertTenant(tenant).flatMap(resultSet -> {
        if (!resultSet.wasApplied()) {
            throw new TenantAlreadyExistsException(tenant.getId());
        }//  w  w  w .java 2 s.c  om
        Map<MetricType, Set<Retention>> retentionsMap = new HashMap<>();
        for (RetentionSettings.RetentionKey key : tenant.getRetentionSettings().keySet()) {
            Set<Retention> retentions = retentionsMap.get(key.metricType);
            if (retentions == null) {
                retentions = new HashSet<>();
            }
            Interval interval = key.interval == null ? Interval.NONE : key.interval;
            Hours hours = hours(tenant.getRetentionSettings().get(key));
            retentions.add(new Retention(new MetricId("[" + key.metricType.getText() + "]", interval),
                    hours.toStandardSeconds().getSeconds()));
            retentionsMap.put(key.metricType, retentions);
        }
        if (retentionsMap.isEmpty()) {
            return Observable.from(Collections.singleton(null));
        }
        List<ResultSetFuture> updateRetentionFutures = new ArrayList<>();

        for (Map.Entry<MetricType, Set<Retention>> metricTypeSetEntry : retentionsMap.entrySet()) {
            updateRetentionFutures.add(dataAccess.updateRetentionsIndex(tenant.getId(),
                    metricTypeSetEntry.getKey(), metricTypeSetEntry.getValue()));

            for (Retention r : metricTypeSetEntry.getValue()) {
                dataRetentions.put(new DataRetentionKey(tenant.getId(), metricTypeSetEntry.getKey()),
                        r.getValue());
            }
        }

        ListenableFuture<List<ResultSet>> updateRetentionsFuture = Futures.allAsList(updateRetentionFutures);
        ListenableFuture<Void> transform = Futures.transform(updateRetentionsFuture, Functions.TO_VOID,
                metricsTasks);
        return RxUtil.from(transform, metricsTasks);
    });
}

From source file:com.facebook.buck.rules.CachingBuildRuleBuilder.java

ListenableFuture<BuildResult> build() {
    final AtomicReference<Long> outputSize = Atomics.newReference();

    ListenableFuture<List<BuildResult>> depResults = Futures.immediateFuture(Collections.emptyList());

    // If we're performing a deep build, guarantee that all dependencies will *always* get
    // materialized locally
    if (buildMode == CachingBuildEngine.BuildMode.DEEP
            || buildMode == CachingBuildEngine.BuildMode.POPULATE_FROM_REMOTE_CACHE) {
        depResults = buildRuleBuilderDelegate.getDepResults(rule, buildContext, executionContext);
    }//from  w  w w  .  j  ava 2 s . c  o m

    ListenableFuture<BuildResult> buildResult = Futures.transformAsync(depResults,
            input -> buildOrFetchFromCache(),
            serviceByAdjustingDefaultWeightsTo(CachingBuildEngine.SCHEDULING_MORE_WORK_RESOURCE_AMOUNTS));

    // Check immediately (without posting a new task) for a failure so that we can short-circuit
    // pending work. Use .catchingAsync() instead of .catching() so that we can propagate unchecked
    // exceptions.
    buildResult = Futures.catchingAsync(buildResult, Throwable.class, throwable -> {
        Preconditions.checkNotNull(throwable);
        buildRuleBuilderDelegate.setFirstFailure(throwable);
        Throwables.throwIfInstanceOf(throwable, Exception.class);
        throw new RuntimeException(throwable);
    });

    buildResult = Futures.transform(buildResult, (result) -> {
        buildRuleBuilderDelegate.markRuleAsUsed(rule, buildContext.getEventBus());
        return result;
    }, MoreExecutors.directExecutor());

    // Setup a callback to handle either the cached or built locally cases.
    AsyncFunction<BuildResult, BuildResult> callback = input -> {

        // If we weren't successful, exit now.
        if (input.getStatus() != BuildRuleStatus.SUCCESS) {
            return Futures.immediateFuture(input);
        }

        try (Scope scope = LeafEvents.scope(buildContext.getEventBus(), "finalizing_build_rule")) {
            // We shouldn't see any build fail result at this point.
            BuildRuleSuccessType success = Preconditions.checkNotNull(input.getSuccess());

            // If we didn't build the rule locally, reload the recorded paths from the build
            // metadata.
            if (success != BuildRuleSuccessType.BUILT_LOCALLY) {
                try {
                    for (String str : onDiskBuildInfo.getValuesOrThrow(BuildInfo.MetadataKey.RECORDED_PATHS)) {
                        buildInfoRecorder.recordArtifact(Paths.get(str));
                    }
                } catch (IOException e) {
                    LOG.error(e, "Failed to read RECORDED_PATHS for %s", rule);
                    throw e;
                }
            }

            // Try get the output size now that all outputs have been recorded.
            if (success == BuildRuleSuccessType.BUILT_LOCALLY) {
                outputSize.set(buildInfoRecorder.getOutputSize());
            }

            // If the success type means the rule has potentially changed it's outputs...
            if (success.outputsHaveChanged()) {

                // The build has succeeded, whether we've fetched from cache, or built locally.
                // So run the post-build steps.
                if (rule instanceof HasPostBuildSteps) {
                    executePostBuildSteps(
                            ((HasPostBuildSteps) rule).getPostBuildSteps(buildContext.getBuildContext()));
                }

                // Invalidate any cached hashes for the output paths, since we've updated them.
                for (Path path : buildInfoRecorder.getRecordedPaths()) {
                    fileHashCache.invalidate(rule.getProjectFilesystem().resolve(path));
                }
            }

            if (SupportsInputBasedRuleKey.isSupported(rule) && success == BuildRuleSuccessType.BUILT_LOCALLY
                    && !buildInfoRecorder.getBuildMetadataFor(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY)
                            .isPresent()) {
                // Doing this here is probably not strictly necessary, however in the case of
                // pipelined rules built locally we will never do an input-based cache check.
                // That check would have written the key to metadata, and there are some asserts
                // during cache upload that try to ensure they are present.
                Optional<RuleKey> inputRuleKey = calculateInputBasedRuleKey(buildContext.getEventBus());
                if (inputRuleKey.isPresent()) {
                    buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY,
                            inputRuleKey.get().toString());
                }
            }

            // If this rule uses dep files and we built locally, make sure we store the new dep file
            // list and re-calculate the dep file rule key.
            if (useDependencyFileRuleKey() && success == BuildRuleSuccessType.BUILT_LOCALLY) {

                // Query the rule for the actual inputs it used.
                ImmutableList<SourcePath> inputs = ((SupportsDependencyFileRuleKey) rule)
                        .getInputsAfterBuildingLocally(buildContext.getBuildContext(),
                                executionContext.getCellPathResolver());

                // Record the inputs into our metadata for next time.
                // TODO(#9117006): We don't support a way to serlialize `SourcePath`s to the cache,
                // so need to use DependencyFileEntry's instead and recover them on deserialization.
                ImmutableList<String> inputStrings = inputs.stream()
                        .map(inputString -> DependencyFileEntry.fromSourcePath(inputString, pathResolver))
                        .map(MoreFunctions.toJsonFunction()).collect(MoreCollectors.toImmutableList());
                buildInfoRecorder.addMetadata(BuildInfo.MetadataKey.DEP_FILE, inputStrings);

                // Re-calculate and store the depfile rule key for next time.
                Optional<RuleKeyAndInputs> depFileRuleKeyAndInputs = calculateDepFileRuleKey(
                        Optional.of(inputStrings), /* allowMissingInputs */ false);
                if (depFileRuleKeyAndInputs.isPresent()) {
                    RuleKey depFileRuleKey = depFileRuleKeyAndInputs.get().getRuleKey();
                    buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.DEP_FILE_RULE_KEY,
                            depFileRuleKey.toString());

                    // Push an updated manifest to the cache.
                    if (useManifestCaching()) {
                        Optional<RuleKeyAndInputs> manifestKey = calculateManifestKey(
                                buildContext.getEventBus());
                        if (manifestKey.isPresent()) {
                            buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.MANIFEST_KEY,
                                    manifestKey.get().getRuleKey().toString());
                            updateAndStoreManifest(depFileRuleKeyAndInputs.get().getRuleKey(),
                                    depFileRuleKeyAndInputs.get().getInputs(), manifestKey.get(),
                                    buildContext.getArtifactCache());
                        }
                    }
                }
            }

            // If this rule was built locally, grab and record the output hashes in the build
            // metadata so that cache hits avoid re-hashing file contents.  Since we use output
            // hashes for input-based rule keys and for detecting non-determinism, we would spend
            // a lot of time re-hashing output paths -- potentially in serialized in a single step.
            // So, do the hashing here to distribute the workload across several threads and cache
            // the results.
            //
            // Also, since hashing outputs can potentially be expensive, we avoid doing this for
            // rules that are marked as uncacheable.  The rationale here is that they are likely not
            // cached due to the sheer size which would be costly to hash or builtin non-determinism
            // in the rule which somewhat defeats the purpose of logging the hash.
            if (success == BuildRuleSuccessType.BUILT_LOCALLY
                    && shouldUploadToCache(success, Preconditions.checkNotNull(outputSize.get()))) {
                ImmutableSortedMap.Builder<String, String> outputHashes = ImmutableSortedMap.naturalOrder();
                for (Path path : buildInfoRecorder.getOutputPaths()) {
                    outputHashes.put(path.toString(),
                            fileHashCache.get(rule.getProjectFilesystem().resolve(path)).toString());
                }
                buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.RECORDED_PATH_HASHES,
                        outputHashes.build());
            }

            // If this rule was fetched from cache, seed the file hash cache with the recorded
            // output hashes from the build metadata.  Since outputs which have been changed have
            // already been invalidated above, this is purely a best-effort optimization -- if the
            // the output hashes weren't recorded in the cache we do nothing.
            if (success != BuildRuleSuccessType.BUILT_LOCALLY && success.outputsHaveChanged()) {
                Optional<ImmutableMap<String, String>> hashes = onDiskBuildInfo
                        .getBuildMap(BuildInfo.MetadataKey.RECORDED_PATH_HASHES);

                // We only seed after first verifying the recorded path hashes.  This prevents the
                // optimization, but is useful to keep in place for a while to verify this optimization
                // is causing issues.
                if (hashes.isPresent() && verifyRecordedPathHashes(rule.getBuildTarget(),
                        rule.getProjectFilesystem(), hashes.get())) {

                    // Seed the cache with the hashes.
                    for (Map.Entry<String, String> ent : hashes.get().entrySet()) {
                        Path path = rule.getProjectFilesystem().getPath(ent.getKey());
                        HashCode hashCode = HashCode.fromString(ent.getValue());
                        fileHashCache.set(rule.getProjectFilesystem().resolve(path), hashCode);
                    }
                }
            }

            // Make sure the origin field is filled in.
            BuildId buildId = buildContext.getBuildId();
            if (success == BuildRuleSuccessType.BUILT_LOCALLY) {
                buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.ORIGIN_BUILD_ID, buildId.toString());
            } else if (success.outputsHaveChanged()) {
                Preconditions.checkState(
                        buildInfoRecorder.getBuildMetadataFor(BuildInfo.MetadataKey.ORIGIN_BUILD_ID)
                                .isPresent(),
                        "Cache hits must populate the %s field (%s)", BuildInfo.MetadataKey.ORIGIN_BUILD_ID,
                        success);
            }

            // Make sure that all of the local files have the same values they would as if the
            // rule had been built locally.
            buildInfoRecorder.addBuildMetadata(BuildInfo.MetadataKey.TARGET, rule.getBuildTarget().toString());
            buildInfoRecorder.addMetadata(BuildInfo.MetadataKey.RECORDED_PATHS,
                    buildInfoRecorder.getRecordedPaths().stream().map(Object::toString)
                            .collect(MoreCollectors.toImmutableList()));
            if (success.shouldWriteRecordedMetadataToDiskAfterBuilding()) {
                try {
                    boolean clearExistingMetadata = success.shouldClearAndOverwriteMetadataOnDisk();
                    buildInfoRecorder.writeMetadataToDisk(clearExistingMetadata);
                } catch (IOException e) {
                    throw new IOException(String.format("Failed to write metadata to disk for %s.", rule), e);
                }
            }

            // Give the rule a chance to populate its internal data structures now that all of
            // the files should be in a valid state.
            try {
                if (rule instanceof InitializableFromDisk) {
                    doInitializeFromDisk((InitializableFromDisk<?>) rule);
                }
            } catch (IOException e) {
                throw new IOException(
                        String.format("Error initializing %s from disk: %s.", rule, e.getMessage()), e);
            }
        }

        return Futures.immediateFuture(input);
    };
    buildResult = Futures.transformAsync(buildResult, ruleAsyncFunction(buildContext.getEventBus(), callback),
            serviceByAdjustingDefaultWeightsTo(CachingBuildEngine.RULE_KEY_COMPUTATION_RESOURCE_AMOUNTS));

    buildResult = Futures.catchingAsync(buildResult, Throwable.class, thrown -> {
        LOG.debug(thrown, "Building rule [%s] failed.", rule.getBuildTarget());

        if (consoleLogBuildFailuresInline) {
            buildContext.getEventBus().post(ConsoleEvent.severe(getErrorMessageIncludingBuildRule(thrown)));
        }

        thrown = maybeAttachBuildRuleNameToException(thrown);
        recordFailureAndCleanUp(thrown);

        return Futures.immediateFuture(BuildResult.failure(rule, thrown));
    });

    // Do things that need to happen after either success or failure, but don't block the dependents
    // while doing so:
    buildRuleBuilderDelegate
            .addAsyncCallback(MoreFutures.addListenableCallback(buildResult, new FutureCallback<BuildResult>() {

                private void uploadToCache(BuildRuleSuccessType success) {

                    // Collect up all the rule keys we have index the artifact in the cache with.
                    Set<RuleKey> ruleKeys = new HashSet<>();

                    // If the rule key has changed (and is not already in the cache), we need to push
                    // the artifact to cache using the new key.
                    ruleKeys.add(ruleKeyFactories.getDefaultRuleKeyFactory().build(rule));

                    // If the input-based rule key has changed, we need to push the artifact to cache
                    // using the new key.
                    if (SupportsInputBasedRuleKey.isSupported(rule)) {
                        Optional<RuleKey> calculatedRuleKey = calculateInputBasedRuleKey(
                                buildContext.getEventBus());
                        Optional<RuleKey> onDiskRuleKey = onDiskBuildInfo
                                .getRuleKey(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY);
                        Optional<RuleKey> metaDataRuleKey = buildInfoRecorder
                                .getBuildMetadataFor(BuildInfo.MetadataKey.INPUT_BASED_RULE_KEY)
                                .map(RuleKey::new);
                        Preconditions.checkState(calculatedRuleKey.equals(onDiskRuleKey),
                                "%s (%s): %s: invalid on-disk input-based rule key: %s != %s",
                                rule.getBuildTarget(), rule.getType(), success, calculatedRuleKey,
                                onDiskRuleKey);
                        Preconditions.checkState(calculatedRuleKey.equals(metaDataRuleKey),
                                "%s: %s: invalid meta-data input-based rule key: %s != %s",
                                rule.getBuildTarget(), success, calculatedRuleKey, metaDataRuleKey);
                        if (calculatedRuleKey.isPresent()) {
                            ruleKeys.add(calculatedRuleKey.get());
                        }
                    }

                    // If the manifest-based rule key has changed, we need to push the artifact to cache
                    // using the new key.
                    if (useManifestCaching()) {
                        Optional<RuleKey> onDiskRuleKey = onDiskBuildInfo
                                .getRuleKey(BuildInfo.MetadataKey.DEP_FILE_RULE_KEY);
                        Optional<RuleKey> metaDataRuleKey = buildInfoRecorder
                                .getBuildMetadataFor(BuildInfo.MetadataKey.DEP_FILE_RULE_KEY).map(RuleKey::new);
                        Preconditions.checkState(onDiskRuleKey.equals(metaDataRuleKey),
                                "%s: %s: inconsistent meta-data and on-disk dep-file rule key: %s != %s",
                                rule.getBuildTarget(), success, onDiskRuleKey, metaDataRuleKey);
                        if (onDiskRuleKey.isPresent()) {
                            ruleKeys.add(onDiskRuleKey.get());
                        }
                    }

                    // Do the actual upload.
                    try {

                        // Verify that the recorded path hashes are accurate.
                        Optional<String> recordedPathHashes = buildInfoRecorder
                                .getBuildMetadataFor(BuildInfo.MetadataKey.RECORDED_PATH_HASHES);
                        if (recordedPathHashes.isPresent() && !verifyRecordedPathHashes(rule.getBuildTarget(),
                                rule.getProjectFilesystem(), recordedPathHashes.get())) {
                            return;
                        }

                        // Push to cache.
                        buildInfoRecorder.performUploadToArtifactCache(ImmutableSet.copyOf(ruleKeys),
                                buildContext.getArtifactCache(), buildContext.getEventBus());

                    } catch (Throwable t) {
                        buildContext.getEventBus().post(
                                ThrowableConsoleEvent.create(t, "Error uploading to cache for %s.", rule));
                    }
                }

                private void handleResult(BuildResult input) {
                    Optional<Long> outputSize = Optional.empty();
                    Optional<HashCode> outputHash = Optional.empty();
                    Optional<BuildRuleSuccessType> successType = Optional.empty();
                    boolean shouldUploadToCache = false;

                    BuildRuleEvent.Resumed resumedEvent = BuildRuleEvent.resumed(rule, buildRuleDurationTracker,
                            ruleKeyFactories.getDefaultRuleKeyFactory());
                    LOG.verbose(resumedEvent.toString());
                    buildContext.getEventBus().post(resumedEvent);

                    if (input.getStatus() == BuildRuleStatus.SUCCESS) {
                        BuildRuleSuccessType success = Preconditions.checkNotNull(input.getSuccess());
                        successType = Optional.of(success);

                        // Try get the output size.
                        if (success == BuildRuleSuccessType.BUILT_LOCALLY
                                || success.shouldUploadResultingArtifact()) {
                            try {
                                outputSize = Optional.of(buildInfoRecorder.getOutputSize());
                            } catch (IOException e) {
                                buildContext.getEventBus().post(ThrowableConsoleEvent.create(e,
                                        "Error getting output size for %s.", rule));
                            }
                        }

                        // Compute it's output hash for logging/tracing purposes, as this artifact will
                        // be consumed by other builds.
                        if (outputSize.isPresent() && shouldHashOutputs(success, outputSize.get())) {
                            try {
                                outputHash = Optional.of(buildInfoRecorder.getOutputHash(fileHashCache));
                            } catch (IOException e) {
                                buildContext.getEventBus().post(ThrowableConsoleEvent.create(e,
                                        "Error getting output hash for %s.", rule));
                            }
                        }

                        // Determine if this is rule is cacheable.
                        shouldUploadToCache = outputSize.isPresent()
                                && shouldUploadToCache(success, outputSize.get());

                        // Upload it to the cache.
                        if (shouldUploadToCache) {
                            uploadToCache(success);
                        }
                    }

                    boolean failureOrBuiltLocally = input.getStatus() == BuildRuleStatus.FAIL
                            || input.getSuccess() == BuildRuleSuccessType.BUILT_LOCALLY;
                    // Log the result to the event bus.
                    BuildRuleEvent.Finished finished = BuildRuleEvent.finished(resumedEvent, getBuildRuleKeys(),
                            input.getStatus(), input.getCacheResult(),
                            onDiskBuildInfo.getBuildValue(BuildInfo.MetadataKey.ORIGIN_BUILD_ID)
                                    .map(BuildId::new),
                            successType, shouldUploadToCache, outputHash, outputSize,
                            getBuildRuleDiagnosticData(failureOrBuiltLocally));
                    LOG.verbose(finished.toString());
                    buildContext.getEventBus().post(finished);
                }

                @Override
                public void onSuccess(BuildResult input) {
                    handleResult(input);

                    // Reset interrupted flag once failure has been recorded.
                    if (input.getFailure() instanceof InterruptedException) {
                        Threads.interruptCurrentThread();
                    }
                }

                @Override
                public void onFailure(@Nonnull Throwable thrown) {
                    throw new AssertionError("Dead code");
                }
            }, serviceByAdjustingDefaultWeightsTo(CachingBuildEngine.RULE_KEY_COMPUTATION_RESOURCE_AMOUNTS)));
    return buildResult;
}

From source file:org.apache.helix.provisioning.yarn.YarnProvisioner.java

@Override
public ListenableFuture<Boolean> stopContainer(final ContainerId containerId) {
    Container container = allocatedContainersMap.get(containerId);
    ListenableFuture<ContainerStopResponse> future = applicationMaster.stopContainer(container);
    return Futures.transform(future, new Function<ContainerStopResponse, Boolean>() {
        @Override/*from  w ww  .  jav a 2s  .  c  o  m*/
        public Boolean apply(ContainerStopResponse response) {
            return response != null;
        }
    }, service);
}

From source file:com.facebook.buck.distributed.build_slave.CacheOptimizedBuildTargetsQueueFactory.java

/**
 * Upload the smallest set of cachable {@link BuildRule}s from the dir-cache, which can help the
 * remote servers in finishing the build faster.
 *
 * @param targetsToBuild Top-level targets which this build needs to optimize for.
 * @param clientStatsTracker For tracking some timing/perf metrics for the Stampede client.
 * @return Future to track the progress of the uploads.
 *//*from   w w  w.ja v a2 s. c o  m*/
public ListenableFuture<?> uploadCriticalNodesFromLocalCache(Iterable<BuildTarget> targetsToBuild,
        ClientStatsTracker clientStatsTracker) {
    clientStatsTracker.startTimer(ClientStatsTracker.DistBuildClientStat.LOCAL_UPLOAD_FROM_DIR_CACHE);

    traverseGraphFromTopLevelUsingAvailableCaches(targetsToBuild);
    return Futures.transform(Futures.allAsList(artifactCache.getAllUploadRuleFutures()), results -> {
        clientStatsTracker.stopTimer(ClientStatsTracker.DistBuildClientStat.LOCAL_UPLOAD_FROM_DIR_CACHE);
        clientStatsTracker.setMissingRulesUploadedFromDirCacheCount(results.size());
        return null;
    }, MoreExecutors.directExecutor());
}

From source file:org.thingsboard.server.dao.timeseries.CassandraBaseTimeseriesDao.java

private ListenableFuture<Optional<TsKvEntry>> findAndAggregateAsync(TenantId tenantId, EntityId entityId,
        ReadTsKvQuery query, long minPartition, long maxPartition) {
    final Aggregation aggregation = query.getAggregation();
    final String key = query.getKey();
    final long startTs = query.getStartTs();
    final long endTs = query.getEndTs();
    final long ts = startTs + (endTs - startTs) / 2;
    ListenableFuture<List<Long>> partitionsListFuture = getPartitionsFuture(tenantId, query, entityId,
            minPartition, maxPartition);
    ListenableFuture<List<ResultSet>> aggregationChunks = Futures.transformAsync(partitionsListFuture,
            getFetchChunksAsyncFunction(tenantId, entityId, key, aggregation, startTs, endTs),
            readResultsProcessingExecutor);

    return Futures.transform(aggregationChunks, new AggregatePartitionsFunction(aggregation, key, ts),
            readResultsProcessingExecutor);
}

From source file:org.thingsboard.server.dao.sql.timeseries.JpaTimeseriesDao.java

@Override
public ListenableFuture<Void> removeLatest(TenantId tenantId, EntityId entityId, DeleteTsKvQuery query) {
    ListenableFuture<TsKvEntry> latestFuture = findLatest(tenantId, entityId, query.getKey());

    ListenableFuture<Boolean> booleanFuture = Futures.transform(latestFuture, tsKvEntry -> {
        long ts = tsKvEntry.getTs();
        return ts > query.getStartTs() && ts <= query.getEndTs();
    }, service);/*from   www.  j  a va 2  s.  c  om*/

    ListenableFuture<Void> removedLatestFuture = Futures.transformAsync(booleanFuture, isRemove -> {
        if (isRemove) {
            TsKvLatestEntity latestEntity = new TsKvLatestEntity();
            latestEntity.setEntityType(entityId.getEntityType());
            latestEntity.setEntityId(fromTimeUUID(entityId.getId()));
            latestEntity.setKey(query.getKey());
            return service.submit(() -> {
                tsKvLatestRepository.delete(latestEntity);
                return null;
            });
        }
        return Futures.immediateFuture(null);
    }, service);

    final SimpleListenableFuture<Void> resultFuture = new SimpleListenableFuture<>();
    Futures.addCallback(removedLatestFuture, new FutureCallback<Void>() {
        @Override
        public void onSuccess(@Nullable Void result) {
            if (query.getRewriteLatestIfDeleted()) {
                ListenableFuture<Void> savedLatestFuture = Futures.transformAsync(booleanFuture, isRemove -> {
                    if (isRemove) {
                        return getNewLatestEntryFuture(tenantId, entityId, query);
                    }
                    return Futures.immediateFuture(null);
                }, service);

                try {
                    resultFuture.set(savedLatestFuture.get());
                } catch (InterruptedException | ExecutionException e) {
                    log.warn("Could not get latest saved value for [{}], {}", entityId, query.getKey(), e);
                }
            } else {
                resultFuture.set(null);
            }
        }

        @Override
        public void onFailure(Throwable t) {
            log.warn("[{}] Failed to process remove of the latest value", entityId, t);
        }
    });
    return resultFuture;
}

From source file:io.prestosql.execution.SqlTask.java

public ListenableFuture<TaskStatus> getTaskStatus(TaskState callersCurrentState) {
    requireNonNull(callersCurrentState, "callersCurrentState is null");

    if (callersCurrentState.isDone()) {
        return immediateFuture(getTaskStatus());
    }//  w ww.jav  a  2s  . co  m

    ListenableFuture<TaskState> futureTaskState = taskStateMachine.getStateChange(callersCurrentState);
    return Futures.transform(futureTaskState, input -> getTaskStatus(), directExecutor());
}

From source file:org.opendaylight.faas.fabric.general.FabricServiceAPIProvider.java

@Override
public Future<RpcResult<Void>> rmGateway(RmGatewayInput input) {
    FabricId fabricId = input.getFabricId();
    final NodeId routerId = input.getLogicalRouter();
    IpAddress gwIp = input.getIpAddress();

    final FabricInstance fabricObj = FabricInstanceCache.INSTANCE.retrieveFabric(fabricId);
    if (fabricObj == null) {
        return Futures.immediateFailedFuture(
                new IllegalArgumentException(String.format("fabric %s does not exist", fabricId)));
    }/*ww  w.  j  av a  2s . c om*/

    ReadWriteTransaction trans = dataBroker.newReadWriteTransaction();

    TpId tpOnRouter = null;
    NodeId lswId = null;
    TpId tpOnSwitch = null;
    Link link = null;

    tpOnRouter = new TpId(String.valueOf(gwIp.getValue()));
    if (tpOnRouter != null) {
        link = findGWLink(trans, fabricId, tpOnRouter, routerId);
        trans.delete(LogicalDatastoreType.OPERATIONAL, MdSalUtils.createLinkIId(fabricId, link.getLinkId()));
    }
    if (link != null) {
        lswId = link.getDestination().getDestNode();
        tpOnSwitch = link.getDestination().getDestTp();
        trans.delete(LogicalDatastoreType.OPERATIONAL,
                MdSalUtils.createLogicPortIId(fabricId, lswId, tpOnSwitch));
    }

    final NodeId flswid = lswId == null ? null : lswId;

    trans.delete(LogicalDatastoreType.OPERATIONAL,
            MdSalUtils.createLogicPortIId(fabricId, routerId, tpOnRouter));

    return Futures.transform(trans.submit(), new AsyncFunction<Void, RpcResult<Void>>() {

        @Override
        public ListenableFuture<RpcResult<Void>> apply(Void submitResult) throws Exception {
            fabricObj.notifyGatewayRemoved(flswid, routerId);

            return Futures.immediateFuture(RpcResultBuilder.<Void>success().build());
        }
    }, executor);
}

From source file:io.prestosql.execution.SqlTask.java

public ListenableFuture<TaskInfo> getTaskInfo(TaskState callersCurrentState) {
    requireNonNull(callersCurrentState, "callersCurrentState is null");

    // If the caller's current state is already done, just return the current
    // state of this task as it will either be done or possibly still running
    // (due to a bug in the caller), since we can not transition from a done
    // state.// w w  w.  j a v a2  s.com
    if (callersCurrentState.isDone()) {
        return immediateFuture(getTaskInfo());
    }

    ListenableFuture<TaskState> futureTaskState = taskStateMachine.getStateChange(callersCurrentState);
    return Futures.transform(futureTaskState, input -> getTaskInfo(), directExecutor());
}

From source file:com.facebook.buck.rules.modern.builders.RemoteExecutionStrategy.java

private ListenableFuture<Optional<BuildResult>> executeNowThatInputsAreReady(ProjectFilesystem filesystem,
        BuildStrategyContext strategyContext, BuildTarget buildTarget, Callable<Throwable> tryStart,
        Digest actionDigest, Iterable<? extends Path> actionOutputs, String ruleName) {
    AtomicReference<Throwable> cancelled = new AtomicReference<>(null);
    ListenableFuture<ExecutionResult> executionResult = executionLimiter.schedule(service, () -> {
        cancelled.set(tryStart.call());//from   w w  w  . j  a  v a2  s  . c  o  m
        boolean isCancelled = cancelled.get() != null;
        if (isCancelled) {
            RemoteExecutionActionEvent.sendTerminalEvent(eventBus, State.ACTION_CANCELLED, buildTarget,
                    Optional.of(actionDigest));
            return Futures.immediateFuture(null);
        }
        Scope executingScope = RemoteExecutionActionEvent.sendEvent(eventBus, State.EXECUTING, buildTarget,
                Optional.of(actionDigest));
        return Futures.transform(executionClients.getRemoteExecutionService().execute(actionDigest, ruleName),
                result -> {
                    executingScope.close();
                    return result;
                }, MoreExecutors.directExecutor());
    });

    return sendFailedEventOnException(Futures.transformAsync(executionResult, result -> {
        if (cancelled.get() != null) {
            return Futures.immediateFuture(Optional.of(strategyContext.createCancelledResult(cancelled.get())));
        }
        return handleResultLimiter.schedule(service, () -> handleExecutionResult(filesystem, strategyContext,
                buildTarget, result, actionDigest, actionOutputs));
    }, service), buildTarget, actionDigest);
}