Example usage for java.util.concurrent CompletableFuture thenApplyAsync

List of usage examples for java.util.concurrent CompletableFuture thenApplyAsync

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture thenApplyAsync.

Prototype

public <U> CompletableFuture<U> thenApplyAsync(Function<? super T, ? extends U> fn, Executor executor) 

Source Link

Usage

From source file:org.ng200.openolympus.services.TestingService.java

private void processVerdict(final Verdict verdict) {
    try {//from   w w w .jav  a2s  .co  m
        final TaskContainer taskContainer = this.taskContainerCache
                .getTaskContainerForTask(verdict.getSolution().getTask());
        final Function<CompletableFuture<SolutionJudge>, CompletableFuture<SolutionJudge>> functionToApplyToJudge = (
                final CompletableFuture<SolutionJudge> futureJudge) -> {
            this.logInAsSystem();
            return futureJudge.thenApplyAsync((final SolutionJudge judge) -> {
                this.logInAsSystem();
                try {
                    if (!judge.isCompiled()) {
                        return this.compileSolution(verdict.getSolution(), judge,
                                taskContainer.getProperties());
                    }
                } catch (final Exception e) {
                    TestingService.logger.error(
                            "Solution compilation failed " + "because judge for task " + "\"{}\"({}) thew an "
                                    + "exception: {}",
                            verdict.getSolution().getTask().getName(), verdict.getSolution().getTask().getId(),
                            e);
                } finally {
                    Janitor.cleanUp(judge);
                }
                return judge;
            }, this.compilationAndCheckingExecutor).thenApplyAsync((final SolutionJudge judge) -> {
                this.logInAsSystem();
                try {
                    this.checkVerdict(verdict, judge, taskContainer.getTestFiles(verdict.getPathToTest()),
                            verdict.getMaximumScore(), taskContainer.getProperties());
                } catch (final Throwable e) {
                    TestingService.logger.error(
                            "Solution judgement failed " + "because judge for task " + "\"{}\"({}) thew an "
                                    + "exception: {}",
                            verdict.getSolution().getTask().getName(), verdict.getSolution().getTask().getId(),
                            e);
                } finally {
                    Janitor.cleanUp(judge);
                }
                return judge;
            }, this.compilationAndCheckingExecutor)
                    .handle((final SolutionJudge judge, final Throwable throwable) -> {
                        this.logInAsSystem();

                        if (throwable != null) {
                            throw new RuntimeException("Couldn't judge verdict: ", throwable);
                        }
                        return judge;
                    });
        };
        this.logInAsSystem();

        taskContainer.applyToJudge(verdict.getSolution(), functionToApplyToJudge);
    } catch (final Exception e) {
        TestingService.logger.error("Couldn't schedule judgement for verdict {}: ", verdict, e);
    }
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

/**
 * Creates txn on the specified stream.//w w w .j  ava2s . co  m
 *
 * Post-condition:
 * 1. If txn creation succeeds, then
 *     (a) txn node is created in the store,
 *     (b) txn segments are successfully created on respective segment stores,
 *     (c) txn is present in the host-txn index of current host,
 *     (d) txn's timeout is being tracked in timeout service.
 *
 * 2. If process fails after creating txn node, but before responding to the client, then since txn is
 * present in the host-txn index, some other controller process shall abort the txn after maxLeaseValue
 *
 * 3. If timeout service tracks timeout of specified txn,
 * then txn is also present in the host-txn index of current process.
 *
 * Invariant:
 * The following invariants are maintained throughout the execution of createTxn, pingTxn and sealTxn methods.
 * 1. If timeout service tracks timeout of a txn, then txn is also present in the host-txn index of current process.
 * 2. If txn znode is updated, then txn is also present in the host-txn index of current process.
 *
 * @param scope               scope name.
 * @param stream              stream name.
 * @param lease               txn lease.
 * @param maxExecutionPeriod  maximum amount of time for which txn may remain open.
 * @param scaleGracePeriod    amount of time for which txn may remain open after scale operation is initiated.
 * @param ctx                 context.
 * @return                    identifier of the created txn.
 */
CompletableFuture<Pair<VersionedTransactionData, List<Segment>>> createTxnBody(final String scope,
        final String stream, final long lease, final long maxExecutionPeriod, final long scaleGracePeriod,
        final OperationContext ctx) {
    // Step 1. Validate parameters.
    CompletableFuture<Void> validate = validate(lease, maxExecutionPeriod, scaleGracePeriod);

    UUID txnId = UUID.randomUUID();
    TxnResource resource = new TxnResource(scope, stream, txnId);

    // Step 2. Add txn to host-transaction index.
    CompletableFuture<Void> addIndex = validate
            .thenComposeAsync(ignore -> streamMetadataStore.addTxnToIndex(hostId, resource, 0), executor)
            .whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId);
                } else {
                    log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
                }
            });

    // Step 3. Create txn node in the store.
    CompletableFuture<VersionedTransactionData> txnFuture = addIndex
            .thenComposeAsync(ignore -> streamMetadataStore.createTransaction(scope, stream, txnId, lease,
                    maxExecutionPeriod, scaleGracePeriod, ctx, executor), executor)
            .whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed creating txn in store", txnId);
                } else {
                    log.debug("Txn={}, created in store", txnId);
                }
            });

    // Step 4. Notify segment stores about new txn.
    CompletableFuture<List<Segment>> segmentsFuture = txnFuture.thenComposeAsync(
            txnData -> streamMetadataStore.getActiveSegments(scope, stream, txnData.getEpoch(), ctx, executor),
            executor);

    CompletableFuture<Void> notify = segmentsFuture
            .thenComposeAsync(activeSegments -> notifyTxnCreation(scope, stream, activeSegments, txnId),
                    executor)
            .whenComplete((v, e) ->
    // Method notifyTxnCreation ensures that notification completes
    // even in the presence of n/w or segment store failures.
    log.debug("Txn={}, notified segments stores", txnId));

    // Step 5. Start tracking txn in timeout service
    return notify.thenApplyAsync(y -> {
        int version = txnFuture.join().getVersion();
        long executionExpiryTime = txnFuture.join().getMaxExecutionExpiryTime();
        timeoutService.addTxn(scope, stream, txnId, version, lease, executionExpiryTime, scaleGracePeriod);
        log.debug("Txn={}, added to timeout service on host={}", txnId, hostId);
        return null;
    }, executor).thenApplyAsync(v -> new ImmutablePair<>(txnFuture.join(), segmentsFuture.join()), executor);
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

private CompletableFuture<PingTxnStatus> fenceTxnUpdateLease(final String scope, final String stream,
        final UUID txnId, final long lease, final OperationContext ctx) {
    // Step 1. Check whether lease value is within necessary bounds.
    // Step 2. Add txn to host-transaction index.
    // Step 3. Update txn node data in the store,thus updating its version
    //         and fencing other processes from tracking this txn's timeout.
    // Step 4. Add this txn to timeout service and start managing timeout for this txn.
    return streamMetadataStore.getTransactionData(scope, stream, txnId, ctx, executor)
            .thenComposeAsync(txnData -> {
                // Step 1. Sanity check for lease value.
                if (lease > txnData.getScaleGracePeriod() || lease > timeoutService.getMaxLeaseValue()) {
                    return CompletableFuture.completedFuture(createStatus(Status.LEASE_TOO_LARGE));
                } else if (lease + System.currentTimeMillis() > txnData.getMaxExecutionExpiryTime()) {
                    return CompletableFuture.completedFuture(createStatus(Status.MAX_EXECUTION_TIME_EXCEEDED));
                } else {
                    TxnResource resource = new TxnResource(scope, stream, txnId);
                    int expVersion = txnData.getVersion() + 1;

                    // Step 2. Add txn to host-transaction index
                    CompletableFuture<Void> addIndex = streamMetadataStore
                            .addTxnToIndex(hostId, resource, expVersion).whenComplete((v, e) -> {
                                if (e != null) {
                                    log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId,
                                            hostId);
                                } else {
                                    log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
                                }/*from w ww . ja v  a2s .co m*/
                            });

                    return addIndex.thenComposeAsync(x -> {
                        // Step 3. Update txn node data in the store.
                        CompletableFuture<VersionedTransactionData> pingTxn = streamMetadataStore
                                .pingTransaction(scope, stream, txnData, lease, ctx, executor)
                                .whenComplete((v, e) -> {
                                    if (e != null) {
                                        log.debug("Txn={}, failed updating txn node in store", txnId);
                                    } else {
                                        log.debug("Txn={}, updated txn node in store", txnId);
                                    }
                                });

                        // Step 4. Add it to timeout service and start managing timeout for this txn.
                        return pingTxn.thenApplyAsync(data -> {
                            int version = data.getVersion();
                            long expiryTime = data.getMaxExecutionExpiryTime();
                            long scaleGracePeriod = data.getScaleGracePeriod();
                            // Even if timeout service has an active/executing timeout task for this txn, it is bound
                            // to fail, since version of txn node has changed because of the above store.pingTxn call.
                            // Hence explicitly add a new timeout task.
                            log.debug("Txn={}, adding txn to host-txn index", txnId);
                            timeoutService.addTxn(scope, stream, txnId, version, lease, expiryTime,
                                    scaleGracePeriod);
                            return createStatus(Status.OK);
                        }, executor);
                    }, executor);
                }
            }, executor);
}