Example usage for java.util.concurrent CompletableFuture thenComposeAsync

List of usage examples for java.util.concurrent CompletableFuture thenComposeAsync

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture thenComposeAsync.

Prototype

public <U> CompletableFuture<U> thenComposeAsync(Function<? super T, ? extends CompletionStage<U>> fn,
            Executor executor) 

Source Link

Usage

From source file:io.pravega.controller.server.eventProcessor.AutoScaleRequestHandler.java

public CompletableFuture<Void> process(final AutoScaleEvent request) {
    if (!(request.getTimestamp() + REQUEST_VALIDITY_PERIOD > System.currentTimeMillis())) {
        // request no longer valid. Ignore.
        // log, because a request was fetched from the stream after its validity expired.
        // This should be a rare occurrence. Either the request was unable to acquire lock for a long time. Or
        // we are processing at much slower rate than the message ingestion rate into the stream. We should scale up.
        // Either way, logging this helps us know how often this is happening.

        log.debug(String.format("Scale Request for stream %s/%s expired", request.getScope(),
                request.getStream()));/* www  . ja  v a2s  . c o m*/
        return CompletableFuture.completedFuture(null);
    }

    final OperationContext context = streamMetadataStore.createContext(request.getScope(), request.getStream());

    return withRetries(() -> {
        final CompletableFuture<ScalingPolicy> policyFuture = streamMetadataStore
                .getConfiguration(request.getScope(), request.getStream(), context, executor)
                .thenApply(StreamConfiguration::getScalingPolicy);

        if (request.getDirection() == AutoScaleEvent.UP) {
            return policyFuture.thenComposeAsync(policy -> processScaleUp(request, policy, context), executor);
        } else {
            return policyFuture.thenComposeAsync(policy -> processScaleDown(request, policy, context),
                    executor);
        }
    }, executor);
}

From source file:io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask.java

public CompletableFuture<Void> execute(final AutoScaleEvent request) {
    if (!(request.getTimestamp() + REQUEST_VALIDITY_PERIOD > System.currentTimeMillis())) {
        // request no longer valid. Ignore.
        // log, because a request was fetched from the stream after its validity expired.
        // This should be a rare occurrence. Either the request was unable to acquire lock for a long time. Or
        // we are processing at much slower rate than the message ingestion rate into the stream. We should scale up.
        // Either way, logging this helps us know how often this is happening.

        log.info(String.format("Scale Request for stream %s/%s expired", request.getScope(),
                request.getStream()));//from  w  ww.jav a  2  s. c  o  m
        return CompletableFuture.completedFuture(null);
    }

    final OperationContext context = streamMetadataStore.createContext(request.getScope(), request.getStream());

    return withRetries(() -> {
        final CompletableFuture<ScalingPolicy> policyFuture = streamMetadataStore
                .getConfiguration(request.getScope(), request.getStream(), context, executor)
                .thenApply(StreamConfiguration::getScalingPolicy);

        if (request.getDirection() == AutoScaleEvent.UP) {
            return policyFuture.thenComposeAsync(policy -> processScaleUp(request, policy, context), executor);
        } else {
            return policyFuture.thenComposeAsync(policy -> processScaleDown(request, policy, context),
                    executor);
        }
    }, executor);
}

From source file:io.pravega.controller.server.eventProcessor.ScaleRequestHandler.java

public CompletableFuture<Void> process(final ScaleEvent request) {
    if (!(request.getTimestamp() + REQUEST_VALIDITY_PERIOD > System.currentTimeMillis())) {
        // request no longer valid. Ignore.
        // log, because a request was fetched from the stream after its validity expired.
        // This should be a rare occurrence. Either the request was unable to acquire lock for a long time. Or
        // we are processing at much slower rate than the message ingestion rate into the stream. We should scale up.
        // Either way, logging this helps us know how often this is happening.

        log.debug(String.format("Scale Request for stream %s/%s expired", request.getScope(),
                request.getStream()));/*  ww w. jav  a  2  s.c o  m*/
        return CompletableFuture.completedFuture(null);
    }

    final OperationContext context = streamMetadataStore.createContext(request.getScope(), request.getStream());

    return RETRY.runAsync(() -> {
        final CompletableFuture<ScalingPolicy> policyFuture = streamMetadataStore
                .getConfiguration(request.getScope(), request.getStream(), context, executor)
                .thenApply(StreamConfiguration::getScalingPolicy);

        if (request.getDirection() == ScaleEvent.UP) {
            return policyFuture.thenComposeAsync(policy -> processScaleUp(request, policy, context), executor);
        } else {
            return policyFuture.thenComposeAsync(policy -> processScaleDown(request, policy, context),
                    executor);
        }
    }, executor);
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

/**
 * Creates txn on the specified stream.//w ww.ja v a  2 s  .c  om
 *
 * Post-condition:
 * 1. If txn creation succeeds, then
 *     (a) txn node is created in the store,
 *     (b) txn segments are successfully created on respective segment stores,
 *     (c) txn is present in the host-txn index of current host,
 *     (d) txn's timeout is being tracked in timeout service.
 *
 * 2. If process fails after creating txn node, but before responding to the client, then since txn is
 * present in the host-txn index, some other controller process shall abort the txn after maxLeaseValue
 *
 * 3. If timeout service tracks timeout of specified txn,
 * then txn is also present in the host-txn index of current process.
 *
 * Invariant:
 * The following invariants are maintained throughout the execution of createTxn, pingTxn and sealTxn methods.
 * 1. If timeout service tracks timeout of a txn, then txn is also present in the host-txn index of current process.
 * 2. If txn znode is updated, then txn is also present in the host-txn index of current process.
 *
 * @param scope               scope name.
 * @param stream              stream name.
 * @param lease               txn lease.
 * @param maxExecutionPeriod  maximum amount of time for which txn may remain open.
 * @param scaleGracePeriod    amount of time for which txn may remain open after scale operation is initiated.
 * @param ctx                 context.
 * @return                    identifier of the created txn.
 */
CompletableFuture<Pair<VersionedTransactionData, List<Segment>>> createTxnBody(final String scope,
        final String stream, final long lease, final long maxExecutionPeriod, final long scaleGracePeriod,
        final OperationContext ctx) {
    // Step 1. Validate parameters.
    CompletableFuture<Void> validate = validate(lease, maxExecutionPeriod, scaleGracePeriod);

    UUID txnId = UUID.randomUUID();
    TxnResource resource = new TxnResource(scope, stream, txnId);

    // Step 2. Add txn to host-transaction index.
    CompletableFuture<Void> addIndex = validate
            .thenComposeAsync(ignore -> streamMetadataStore.addTxnToIndex(hostId, resource, 0), executor)
            .whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId);
                } else {
                    log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
                }
            });

    // Step 3. Create txn node in the store.
    CompletableFuture<VersionedTransactionData> txnFuture = addIndex
            .thenComposeAsync(ignore -> streamMetadataStore.createTransaction(scope, stream, txnId, lease,
                    maxExecutionPeriod, scaleGracePeriod, ctx, executor), executor)
            .whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed creating txn in store", txnId);
                } else {
                    log.debug("Txn={}, created in store", txnId);
                }
            });

    // Step 4. Notify segment stores about new txn.
    CompletableFuture<List<Segment>> segmentsFuture = txnFuture.thenComposeAsync(
            txnData -> streamMetadataStore.getActiveSegments(scope, stream, txnData.getEpoch(), ctx, executor),
            executor);

    CompletableFuture<Void> notify = segmentsFuture
            .thenComposeAsync(activeSegments -> notifyTxnCreation(scope, stream, activeSegments, txnId),
                    executor)
            .whenComplete((v, e) ->
    // Method notifyTxnCreation ensures that notification completes
    // even in the presence of n/w or segment store failures.
    log.debug("Txn={}, notified segments stores", txnId));

    // Step 5. Start tracking txn in timeout service
    return notify.thenApplyAsync(y -> {
        int version = txnFuture.join().getVersion();
        long executionExpiryTime = txnFuture.join().getMaxExecutionExpiryTime();
        timeoutService.addTxn(scope, stream, txnId, version, lease, executionExpiryTime, scaleGracePeriod);
        log.debug("Txn={}, added to timeout service on host={}", txnId, hostId);
        return null;
    }, executor).thenApplyAsync(v -> new ImmutablePair<>(txnFuture.join(), segmentsFuture.join()), executor);
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

private CompletableFuture<PingTxnStatus> fenceTxnUpdateLease(final String scope, final String stream,
        final UUID txnId, final long lease, final OperationContext ctx) {
    // Step 1. Check whether lease value is within necessary bounds.
    // Step 2. Add txn to host-transaction index.
    // Step 3. Update txn node data in the store,thus updating its version
    //         and fencing other processes from tracking this txn's timeout.
    // Step 4. Add this txn to timeout service and start managing timeout for this txn.
    return streamMetadataStore.getTransactionData(scope, stream, txnId, ctx, executor)
            .thenComposeAsync(txnData -> {
                // Step 1. Sanity check for lease value.
                if (lease > txnData.getScaleGracePeriod() || lease > timeoutService.getMaxLeaseValue()) {
                    return CompletableFuture.completedFuture(createStatus(Status.LEASE_TOO_LARGE));
                } else if (lease + System.currentTimeMillis() > txnData.getMaxExecutionExpiryTime()) {
                    return CompletableFuture.completedFuture(createStatus(Status.MAX_EXECUTION_TIME_EXCEEDED));
                } else {
                    TxnResource resource = new TxnResource(scope, stream, txnId);
                    int expVersion = txnData.getVersion() + 1;

                    // Step 2. Add txn to host-transaction index
                    CompletableFuture<Void> addIndex = streamMetadataStore
                            .addTxnToIndex(hostId, resource, expVersion).whenComplete((v, e) -> {
                                if (e != null) {
                                    log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId,
                                            hostId);
                                } else {
                                    log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
                                }/*from  w w  w  .  j a  v a2 s.  c o  m*/
                            });

                    return addIndex.thenComposeAsync(x -> {
                        // Step 3. Update txn node data in the store.
                        CompletableFuture<VersionedTransactionData> pingTxn = streamMetadataStore
                                .pingTransaction(scope, stream, txnData, lease, ctx, executor)
                                .whenComplete((v, e) -> {
                                    if (e != null) {
                                        log.debug("Txn={}, failed updating txn node in store", txnId);
                                    } else {
                                        log.debug("Txn={}, updated txn node in store", txnId);
                                    }
                                });

                        // Step 4. Add it to timeout service and start managing timeout for this txn.
                        return pingTxn.thenApplyAsync(data -> {
                            int version = data.getVersion();
                            long expiryTime = data.getMaxExecutionExpiryTime();
                            long scaleGracePeriod = data.getScaleGracePeriod();
                            // Even if timeout service has an active/executing timeout task for this txn, it is bound
                            // to fail, since version of txn node has changed because of the above store.pingTxn call.
                            // Hence explicitly add a new timeout task.
                            log.debug("Txn={}, adding txn to host-txn index", txnId);
                            timeoutService.addTxn(scope, stream, txnId, version, lease, expiryTime,
                                    scaleGracePeriod);
                            return createStatus(Status.OK);
                        }, executor);
                    }, executor);
                }
            }, executor);
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

/**
 * Seals a txn and transitions it to COMMITTING (resp. ABORTING) state if commit param is true (resp. false).
 *
 * Post-condition://from w w  w .ja v a  2  s .  c o  m
 * 1. If seal completes successfully, then
 *     (a) txn state is COMMITTING/ABORTING,
 *     (b) CommitEvent/AbortEvent is present in the commit stream/abort stream,
 *     (c) txn is removed from host-txn index,
 *     (d) txn is removed from the timeout service.
 *
 * 2. If process fails after transitioning txn to COMMITTING/ABORTING state, but before responding to client, then
 * since txn is present in the host-txn index, some other controller process shall put CommitEvent/AbortEvent to
 * commit stream/abort stream.
 *
 * @param host    host id. It is different from hostId iff invoked from TxnSweeper for aborting orphaned txn.
 * @param scope   scope name.
 * @param stream  stream name.
 * @param commit  boolean indicating whether to commit txn.
 * @param txnId   txn id.
 * @param version expected version of txn node in store.
 * @param ctx     context.
 * @return        Txn status after sealing it.
 */
CompletableFuture<TxnStatus> sealTxnBody(final String host, final String scope, final String stream,
        final boolean commit, final UUID txnId, final Integer version, final OperationContext ctx) {
    TxnResource resource = new TxnResource(scope, stream, txnId);
    Optional<Integer> versionOpt = Optional.ofNullable(version);

    // Step 1. Add txn to current host's index, if it is not already present
    CompletableFuture<Void> addIndex = host.equals(hostId) && !timeoutService.containsTxn(scope, stream, txnId)
            ?
            // PS: txn version in index does not matter, because if update is successful,
            // then txn would no longer be open.
            streamMetadataStore.addTxnToIndex(hostId, resource, Integer.MAX_VALUE)
            : CompletableFuture.completedFuture(null);

    addIndex.whenComplete((v, e) -> {
        if (e != null) {
            log.debug("Txn={}, already present/newly added to host-txn index of host={}", txnId, hostId);
        } else {
            log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId);
        }
    });

    // Step 2. Seal txn
    CompletableFuture<AbstractMap.SimpleEntry<TxnStatus, Integer>> sealFuture = addIndex.thenComposeAsync(
            x -> streamMetadataStore.sealTransaction(scope, stream, txnId, commit, versionOpt, ctx, executor),
            executor).whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed sealing txn", txnId);
                } else {
                    log.debug("Txn={}, sealed successfully, commit={}", txnId, commit);
                }
            });

    // Step 3. write event to corresponding stream.
    return sealFuture.thenComposeAsync(pair -> {
        TxnStatus status = pair.getKey();
        switch (status) {
        case COMMITTING:
            return writeCommitEvent(scope, stream, pair.getValue(), txnId, status);
        case ABORTING:
            return writeAbortEvent(scope, stream, pair.getValue(), txnId, status);
        case ABORTED:
        case COMMITTED:
            return CompletableFuture.completedFuture(status);
        case OPEN:
        case UNKNOWN:
        default:
            // Not possible after successful streamStore.sealTransaction call, because otherwise an
            // exception would be thrown.
            return CompletableFuture.completedFuture(status);
        }
    }, executor).thenComposeAsync(status -> {
        // Step 4. Remove txn from timeoutService, and from the index.
        timeoutService.removeTxn(scope, stream, txnId);
        log.debug("Txn={}, removed from timeout service", txnId);
        return streamMetadataStore.removeTxnFromIndex(host, resource, true).whenComplete((v, e) -> {
            if (e != null) {
                log.debug("Txn={}, failed removing txn from host-txn index of host={}", txnId, hostId);
            } else {
                log.debug("Txn={}, removed txn from host-txn index of host={}", txnId, hostId);
            }
        }).thenApply(x -> status);
    }, executor);
}