Example usage for com.google.common.util.concurrent Futures transform

List of usage examples for com.google.common.util.concurrent Futures transform

Introduction

In this page you can find the example usage for com.google.common.util.concurrent Futures transform.

Prototype

public static <I, O> ListenableFuture<O> transform(ListenableFuture<I> input,
        Function<? super I, ? extends O> function, Executor executor) 

Source Link

Document

Returns a new ListenableFuture whose result is the product of applying the given Function to the result of the given Future .

Usage

From source file:com.facebook.buck.parser.AbstractParser.java

@Override
public ListenableFuture<SortedMap<String, Object>> getTargetNodeRawAttributesJob(PerBuildState state, Cell cell,
        TargetNode<?> targetNode) throws BuildFileParseException {
    Cell owningCell = cell.getCell(targetNode.getBuildTarget());
    ListenableFuture<BuildFileManifest> buildFileManifestFuture = state.getBuildFileManifestJob(owningCell,
            cell.getAbsolutePathToBuildFile(targetNode.getBuildTarget()));
    return Futures.transform(buildFileManifestFuture,
            buildFileManifest -> getTargetFromManifest(targetNode, buildFileManifest),
            MoreExecutors.directExecutor());
}

From source file:org.opendaylight.faas.fabric.general.EndPointRegister.java

@Override
public Future<RpcResult<RegisterEndpointOutput>> registerEndpoint(RegisterEndpointInput input) {

    final RpcResultBuilder<RegisterEndpointOutput> resultBuilder = RpcResultBuilder
            .<RegisterEndpointOutput>success();
    final RegisterEndpointOutputBuilder outputBuilder = new RegisterEndpointOutputBuilder();

    final FabricId fabricid = input.getFabricId();
    final FabricInstance fabricObj = FabricInstanceCache.INSTANCE.retrieveFabric(fabricid);
    if (fabricObj == null) {
        return Futures.immediateFailedFuture(new IllegalArgumentException("fabric is not exist!"));
    }//from  w  w  w . ja va 2  s  .c o m

    Uuid epId = input.getEndpointUuid();
    if (epId == null) {
        epId = new Uuid(UUID.randomUUID().toString());
    }
    final Uuid newepId = epId;

    final InstanceIdentifier<Endpoint> eppath = Constants.DOM_ENDPOINTS_PATH.child(Endpoint.class,
            new EndpointKey(newepId));

    EndpointBuilder epBuilder = new EndpointBuilder();
    epBuilder.setEndpointUuid(newepId);
    epBuilder.setGateway(input.getGateway());
    epBuilder.setIpAddress(input.getIpAddress());
    epBuilder.setLocation(input.getLocation());
    epBuilder.setLogicalLocation(input.getLogicalLocation());
    epBuilder.setMacAddress(input.getMacAddress());
    epBuilder.setPublicIp(input.getPublicIp());
    epBuilder.setOwnFabric(fabricid);

    ReadWriteTransaction trans = dataBroker.newReadWriteTransaction();
    trans.put(LogicalDatastoreType.OPERATIONAL, eppath, epBuilder.build(), true);

    CheckedFuture<Void, TransactionCommitFailedException> future = trans.submit();

    return Futures.transform(future, new AsyncFunction<Void, RpcResult<RegisterEndpointOutput>>() {

        @Override
        public ListenableFuture<RpcResult<RegisterEndpointOutput>> apply(Void input) throws Exception {
            outputBuilder.setEndpointId(newepId);
            return Futures.immediateFuture(resultBuilder.withResult(outputBuilder.build()).build());
        }
    }, executor);
}

From source file:io.prestosql.plugin.hive.util.AsyncQueue.java

/**
 * Invoke {@code function} with up to {@code maxSize} elements removed from the head of the queue,
 * and insert elements in the return value to the tail of the queue.
 * <p>//from  w  w  w . j av a 2  s .c  o  m
 * If no element is currently available, invocation of {@code function} will be deferred until some
 * element is available, or no more elements will be. Spurious invocation of {@code function} is
 * possible.
 * <p>
 * Insertion through return value of {@code function} will be effective even if {@link #finish()} has been invoked.
 * When borrow (of a non-empty list) is ongoing, {@link #isFinished()} will return false.
 * If an empty list is supplied to {@code function}, it must not return a result indicating intention
 * to insert elements into the queue.
 */
public <O> ListenableFuture<O> borrowBatchAsync(int maxSize, Function<List<T>, BorrowResult<T, O>> function) {
    checkArgument(maxSize >= 0, "maxSize must be at least 0");

    ListenableFuture<List<T>> borrowedListFuture;
    synchronized (this) {
        List<T> list = getBatch(maxSize);
        if (!list.isEmpty()) {
            borrowedListFuture = immediateFuture(list);
            borrowerCount++;
        } else if (finishing && borrowerCount == 0) {
            borrowedListFuture = immediateFuture(ImmutableList.of());
        } else {
            borrowedListFuture = Futures.transform(notEmptySignal, ignored -> {
                synchronized (this) {
                    List<T> batch = getBatch(maxSize);
                    if (!batch.isEmpty()) {
                        borrowerCount++;
                    }
                    return batch;
                }
            }, executor);
        }
    }

    return Futures.transform(borrowedListFuture, elements -> {
        // The borrowerCount field was only incremented for non-empty lists.
        // Decrements should only happen for non-empty lists.
        // When it should, it must always happen even if the caller-supplied function throws.
        try {
            BorrowResult<T, O> borrowResult = function.apply(elements);
            if (elements.isEmpty()) {
                checkArgument(borrowResult.getElementsToInsert().isEmpty(),
                        "Function must not insert anything when no element is borrowed");
                return borrowResult.getResult();
            }
            for (T element : borrowResult.getElementsToInsert()) {
                offer(element);
            }
            return borrowResult.getResult();
        } finally {
            if (!elements.isEmpty()) {
                synchronized (this) {
                    borrowerCount--;
                    signalIfFinishing();
                }
            }
        }
    }, directExecutor());
}

From source file:org.thingsboard.server.dao.timeseries.CassandraBaseTimeseriesDao.java

@Override
public ListenableFuture<List<TsKvEntry>> findAllAsync(TenantId tenantId, EntityId entityId,
        List<ReadTsKvQuery> queries) {
    List<ListenableFuture<List<TsKvEntry>>> futures = queries.stream()
            .map(query -> findAllAsync(tenantId, entityId, query)).collect(Collectors.toList());
    return Futures.transform(Futures.allAsList(futures),
            new Function<List<List<TsKvEntry>>, List<TsKvEntry>>() {
                @Nullable/*from  w  ww .  j a  v  a 2s .  c o m*/
                @Override
                public List<TsKvEntry> apply(@Nullable List<List<TsKvEntry>> results) {
                    if (results == null || results.isEmpty()) {
                        return null;
                    }
                    return results.stream().flatMap(List::stream).collect(Collectors.toList());
                }
            }, readResultsProcessingExecutor);
}

From source file:org.jclouds.atmos.internal.StubAtmosAsyncClient.java

@Override
public ListenableFuture<URI> createFile(String parent, AtmosObject object, PutOptions... options) {
    final String uri = "http://stub/containers/" + parent + "/" + object.getContentMetadata().getName();
    String file = object.getContentMetadata().getName();
    String container = parent;/*from   w  w  w.  ja v  a  2s .c  o m*/
    if (parent.indexOf('/') != -1) {
        container = parent.substring(0, parent.indexOf('/'));
        String path = parent.substring(parent.indexOf('/') + 1);
        if (!path.equals(""))
            object.getContentMetadata().setName(path + "/" + file);
    }
    Blob blob = object2Blob.apply(object);
    return Futures.transform(blobStore.putBlob(container, blob), new Function<String, URI>() {

        public URI apply(String from) {
            return URI.create(uri);
        }

    }, userExecutor);
}

From source file:com.google.caliper.runner.server.ServerSocketService.java

/**
 * Returns a {@link ListenableFuture} for an {@link OpenedSocket} corresponding to the given id.
 *
 * <p>N.B. calling this method 'consumes' the connection and as such calling it or {@link
 * #getInputStream} twice with the same id will not work; the second future returned will never
 * complete. Similarly calling it with an id that does not correspond to a worker trying to
 * connect will also fail./*ww w. j a  v a 2s .c  o  m*/
 */
public ListenableFuture<OpenedSocket> getConnection(UUID id) {
    return Futures.transform(getSocket(id), OPENED_SOCKET_FUNCTION, directExecutor());
}

From source file:org.apache.helix.provisioning.yarn.YarnProvisioner.java

@Override
public ListenableFuture<Boolean> startContainer(final ContainerId containerId, Participant participant) {
    Container container = allocatedContainersMap.get(containerId);
    ContainerLaunchContext launchContext;
    try {/*from ww w.j  a  v a2s  . c  om*/
        launchContext = createLaunchContext(containerId, container, participant);
    } catch (Exception e) {
        LOG.error("Exception while creating context to launch container:" + containerId, e);
        return null;
    }
    ListenableFuture<ContainerLaunchResponse> future = applicationMaster.launchContainer(container,
            launchContext);
    return Futures.transform(future, new Function<ContainerLaunchResponse, Boolean>() {
        @Override
        public Boolean apply(ContainerLaunchResponse response) {
            return response != null;
        }
    }, service);
}

From source file:org.thingsboard.server.dao.sql.timeseries.JpaTimeseriesDao.java

private ListenableFuture<List<TsKvEntry>> findAllAsync(TenantId tenantId, EntityId entityId,
        ReadTsKvQuery query) {// w  ww  .j  a va2 s  .co m
    if (query.getAggregation() == Aggregation.NONE) {
        return findAllAsyncWithLimit(entityId, query);
    } else {
        long stepTs = query.getStartTs();
        List<ListenableFuture<Optional<TsKvEntry>>> futures = new ArrayList<>();
        while (stepTs < query.getEndTs()) {
            long startTs = stepTs;
            long endTs = stepTs + query.getInterval();
            long ts = startTs + (endTs - startTs) / 2;
            futures.add(findAndAggregateAsync(entityId, query.getKey(), startTs, endTs, ts,
                    query.getAggregation()));
            stepTs = endTs;
        }
        ListenableFuture<List<Optional<TsKvEntry>>> future = Futures.allAsList(futures);
        return Futures.transform(future, new Function<List<Optional<TsKvEntry>>, List<TsKvEntry>>() {
            @Nullable
            @Override
            public List<TsKvEntry> apply(@Nullable List<Optional<TsKvEntry>> results) {
                if (results == null || results.isEmpty()) {
                    return null;
                }
                return results.stream().filter(Optional::isPresent).map(Optional::get)
                        .collect(Collectors.toList());
            }
        }, service);
    }
}

From source file:com.facebook.buck.distributed.ServerContentsProvider.java

private ListenableFuture<byte[]> scheduleFileToBeFetched(BuildJobStateFileHashEntry entry) {
    Preconditions.checkState(entry.isSetSha1(),
            String.format("File hash missing for file [%s].", entry.getPath()));

    ListenableFuture<byte[]> future;
    synchronized (multiFetchLock) {
        LOG.verbose("Scheduling file to be fetched from the CAS: [%s] (SHA1: %s).", entry.getPath(),
                entry.getSha1());/*from  www .  ja v a 2  s  .c  o m*/
        hashCodesToFetch.add(entry.getSha1());
        future = Futures.transform(multiFetchFuture,
                resultMap -> Objects.requireNonNull(resultMap).get(entry.getSha1()),
                MoreExecutors.directExecutor());
    }

    return future;
}

From source file:com.facebook.buck.distributed.build_client.PreBuildPhase.java

/** Run all steps required before the build. */
public Pair<StampedeId, ListenableFuture<Void>> runPreDistBuildLocalStepsAsync(
        ListeningExecutorService networkExecutorService, ProjectFilesystem projectFilesystem,
        FileHashCache fileHashCache, BuckEventBus eventBus, BuildId buildId, BuildMode buildMode,
        MinionRequirements minionRequirements, String repository, String tenantId,
        ListenableFuture<ParallelRuleKeyCalculator<RuleKey>> localRuleKeyCalculatorFuture)
        throws IOException, DistBuildRejectedException {
    ConsoleEventsDispatcher consoleEventsDispatcher = new ConsoleEventsDispatcher(eventBus);

    distBuildClientStats.startTimer(CREATE_DISTRIBUTED_BUILD);
    List<String> buildTargets = topLevelTargets.stream().map(BuildTarget::getFullyQualifiedName).sorted()
            .collect(Collectors.toList());
    BuildJob job = distBuildService.createBuild(buildId, buildMode, minionRequirements, repository, tenantId,
            buildTargets, buildLabel);//from w  ww  .java2 s .  co m
    distBuildClientStats.stopTimer(CREATE_DISTRIBUTED_BUILD);

    if (job.getBuildLabel() != null) {
        // Override the build label with the server-side inferred label.
        this.buildLabel = job.getBuildLabel();
        distBuildClientStats.setUserOrInferredBuildLabel(buildLabel);
    }

    StampedeId stampedeId = job.getStampedeId();
    eventBus.post(new DistBuildCreatedEvent(stampedeId));

    LOG.info("Created job. StampedeId = " + stampedeId.getId());

    consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of(), "SERIALIZING AND UPLOADING DATA");

    List<ListenableFuture<?>> asyncJobs = new LinkedList<>();

    asyncJobs.add(Futures.transformAsync(asyncJobState, jobState -> {
        LOG.info("Uploading local changes.");
        return distBuildService.uploadMissingFilesAsync(distBuildCellIndexer.getLocalFilesystemsByCellIndex(),
                jobState.fileHashes, distBuildClientStats, networkExecutorService);
    }, networkExecutorService));

    asyncJobs.add(Futures.transform(asyncJobState, jobState -> {
        LOG.info("Uploading target graph.");
        try {
            distBuildService.uploadTargetGraph(jobState, stampedeId, distBuildClientStats);
        } catch (IOException e) {
            throw new RuntimeException("Failed to upload target graph with exception.", e);
        }
        return null;
    }, networkExecutorService));

    LOG.info("Uploading buck dot-files.");
    asyncJobs.add(distBuildService.uploadBuckDotFilesAsync(stampedeId, projectFilesystem, fileHashCache,
            distBuildClientStats, networkExecutorService));

    asyncJobs.add(networkExecutorService.submit(() -> {
        LOG.info("Setting buck version.");
        try {
            distBuildService.setBuckVersion(stampedeId, buckVersion, distBuildClientStats);
        } catch (IOException e) {
            throw new RuntimeException("Failed to set buck-version with exception.", e);
        }
    }));

    DistBuildConfig distBuildConfig = new DistBuildConfig(buildExecutorArgs.getBuckConfig());

    if (distBuildConfig.isUploadFromLocalCacheEnabled()) {
        asyncJobs.add(Futures.transformAsync(localRuleKeyCalculatorFuture, localRuleKeyCalculator -> {
            try (ArtifactCacheByBuildRule artifactCache = new DistBuildArtifactCacheImpl(
                    actionAndTargetGraphs.getActionGraphAndBuilder().getActionGraphBuilder(),
                    networkExecutorService,
                    buildExecutorArgs.getArtifactCacheFactory().remoteOnlyInstance(true, false), eventBus,
                    localRuleKeyCalculator,
                    Optional.of(buildExecutorArgs.getArtifactCacheFactory().localOnlyInstance(true, false)))) {

                return new CacheOptimizedBuildTargetsQueueFactory(
                        actionAndTargetGraphs.getActionGraphAndBuilder().getActionGraphBuilder(), artifactCache,
                        /* isDeepRemoteBuild */ false, localRuleKeyCalculator.getRuleDepsCache(),
                        /* shouldBuildSelectedTargetsLocally */ false)
                                .uploadCriticalNodesFromLocalCache(topLevelTargets, distBuildClientStats);

            } catch (Exception e) {
                LOG.error(e, "Failed to create BuildTargetsQueue.");
                throw new RuntimeException(e);
            }
        }, networkExecutorService));
    }

    ListenableFuture<Void> asyncPrep = Futures.transform(Futures.allAsList(asyncJobs), results -> {
        LOG.info("Finished async preparation of stampede job.");
        consoleEventsDispatcher.postDistBuildStatusEvent(job, ImmutableList.of(), "STARTING REMOTE BUILD");

        // Everything is now setup remotely to run the distributed build. No more local prep.
        this.distBuildClientStats.stopTimer(LOCAL_PREPARATION);
        return null;
    }, MoreExecutors.directExecutor());

    return new Pair<StampedeId, ListenableFuture<Void>>(stampedeId, asyncPrep);
}