Example usage for java.util.concurrent CompletableFuture thenCombine

List of usage examples for java.util.concurrent CompletableFuture thenCombine

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture thenCombine.

Prototype

public <U, V> CompletableFuture<V> thenCombine(CompletionStage<? extends U> other,
            BiFunction<? super T, ? super U, ? extends V> fn) 

Source Link

Usage

From source file:com.connio.sdk.example.ConnioAsyncClientExample.java

public static void main(String[] args) {
    try {/*w  w w .  j a va  2s  . c om*/

        // Initialises the context with api key credentials
        Connio.init("_key_671901158138828071", "31acec81b2414b03acf3d8c37ebdf305");

        // Create device profile
        final CompletableFuture<DeviceProfile> deviceProfile = DeviceProfile.create("device_profile_sdk")
                .executeAsync();

        // On device profile creation we compose property creation using it
        final CompletableFuture<Property> property = deviceProfile.thenCompose((dp) -> {
            return dp.addProperty("numericProperty1", Property.Type.Number).setAccess(Property.Access.Public)
                    .executeAsync();
        });

        // On device profile and property creation we compose method creation using them
        final CompletableFuture<Method> method = deviceProfile
                .thenCombine(property, (dp, p) -> new ImmutablePair<>(dp, p))
                .thenCompose((deviceProfileAndProperty) -> {
                    final DeviceProfile dp = deviceProfileAndProperty.getLeft();
                    final Property prop = deviceProfileAndProperty.getRight();

                    final MethodImpl implementation = new MethodImpl.Builder("return value;",
                            MethodImpl.ExecType.Javascript).build();
                    return dp.addMethod("getter", Method.Access.Public, implementation).setInputId(prop.getId())
                            .executeAsync();
                });

        // On device profile and method creation we will create the device
        final CompletableFuture<Device> device = deviceProfile.thenCombine(method, (dp, m) -> dp)
                .thenCompose((dp) -> dp.addDevice().setStatus(Device.Status.Debug).executeAsync());

        // Write three data points
        device.thenCombine(property,
                (d, p) -> d.writeData(p, new DataFeed(new DataPoint.Builder(16.0).build())).executeAsync());
        device.thenCombine(property,
                (d, p) -> d.writeData(p, new DataFeed(new DataPoint.Builder(17.0).build())).executeAsync());
        device.thenCombine(property,
                (d, p) -> d.writeData(p, new DataFeed(new DataPoint.Builder(18.0).build())).executeAsync());

        // Retrieve getter method value
        CompletableFuture<Object> methodValue = device.thenCombine(method, (d, m) -> new ImmutablePair<>(d, m))
                .thenCompose((deviceAndMethod) -> {
                    final Device d = deviceAndMethod.getLeft();
                    final Method m = deviceAndMethod.getRight();

                    return d.readMethod(m).executeAsync();
                });

        // Get device state
        CompletableFuture<Optional<DeviceState>> deviceState = device.thenCompose(d -> d.state().fetchAsync());

        // Until this point there's no blocking whatsoever. Now we block until we get futures values to print
        // some information
        System.out.println("Device profile id: " + deviceProfile.get().getId());
        System.out.println("Property id: " + property.get().getId());
        System.out.println("Method id: " + method.get().getId());
        System.out.println("Device id: " + device.get().getId());
        System.out.println("Getter method value: " + methodValue.get());
        System.out.println("Device state: " + deviceState.get().get());

        Connio.terminate();

    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:com.redhat.coolstore.api_gateway.ApiGatewayController.java

/**
 * This /api REST endpoint uses Java 8 concurrency to call two backend services to construct the result
 *
 * @return the list/*from w  ww  . j  a va  2  s . com*/
 */
@CrossOrigin(maxAge = 3600)
@RequestMapping(method = RequestMethod.GET, value = "/products", produces = MediaType.APPLICATION_JSON_VALUE)
@ApiOperation("Get a list of products")
@ResponseBody
public List<Product> list() throws ExecutionException, InterruptedException {

    final CompletableFuture<List<Product>> productList = CompletableFuture
            .supplyAsync(() -> feignClientFactory.getPricingClient().getService().getProducts(), es);

    return productList.thenCompose((List<Product> products) -> {

        List<CompletableFuture<Product>> all = products.stream()
                .map(p -> productList.thenCombine(_getInventory(p.itemId), (pl, a) -> {
                    p.availability = a;
                    return p;
                })).collect(Collectors.toList());

        return CompletableFuture.allOf(all.toArray(new CompletableFuture[all.size()]))
                .thenApply(v -> all.stream().map(CompletableFuture::join).collect(Collectors.toList()));
    }).get();

}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Notify interested harvesters in the creation of a new bucket
 * @param new_object - the bucket just created
 * @param is_suspended - whether it's initially suspended
 * @param actor_context - the actor context for broadcasting out requests
 * @return the management side channel from the harvesters
 *//*from w w w.ja  v a2 s  .  c om*/
public static CompletableFuture<Collection<BasicMessageBean>> requestNewBucket(final DataBucketBean new_object,
        final boolean is_suspended, final ICrudService<DataBucketStatusBean> status_store,
        final ManagementDbActorContext actor_context) {
    // OK if we're here then it's time to notify any interested harvesters

    final BucketActionMessage.NewBucketActionMessage new_message = new BucketActionMessage.NewBucketActionMessage(
            new_object, is_suspended);

    final CompletableFuture<BucketActionCollectedRepliesMessage> f = BucketActionSupervisor
            .askBucketActionActor(Optional.empty(), actor_context.getBucketActionSupervisor(),
                    actor_context.getActorSystem(), (BucketActionMessage) new_message, Optional.empty());

    final CompletableFuture<Collection<BasicMessageBean>> management_results = f
            .<Collection<BasicMessageBean>>thenApply(replies -> {
                return replies.replies();
            });

    // Apply the affinity to the bucket status (which must exist, by construction):
    // (note any node information coming back from streaming enrichment is filtered out by the getSuccessfulNodes call below)
    final CompletableFuture<Boolean> update_future = MgmtCrudUtils.applyNodeAffinityWrapper(new_object,
            status_store, management_results);

    // Convert BucketActionCollectedRepliesMessage into a management side-channel:
    // (combine the 2 futures but then only return the management results, just need for the update to have completed)
    return management_results.thenCombine(update_future, (mgmt, update) -> mgmt);
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Notify interested harvesters that a bucket has been updated
 * @param new_object/*from w ww  .  j  a v  a  2  s  .c o  m*/
 * @param old_version
 * @param status
 * @param actor_context
 * @param retry_store
 * @return
 */
public static CompletableFuture<Collection<BasicMessageBean>> requestUpdatedBucket(
        final DataBucketBean new_object, final DataBucketBean old_version, final DataBucketStatusBean status,
        final ManagementDbActorContext actor_context, final ICrudService<DataBucketStatusBean> status_store,
        final ICrudService<BucketActionRetryMessage> retry_store) {
    // First off, a couple of special cases relating to node affinity
    final boolean multi_node_enabled = Optional.ofNullable(new_object.multi_node_enabled()).orElse(false);
    final Set<String> node_affinity = Optional.ofNullable(status.node_affinity()).map(na -> {
        if (multi_node_enabled && (1 == status.node_affinity().size())) {
            //(this might indicate that we've moved from single node -> multi node
            return Collections.<String>emptyList();
        } else if (!multi_node_enabled && (status.node_affinity().size() > 1)) {
            //(this definitely indicates that we've moved from multi node -> single node)                  
            return Collections.<String>emptyList();
        } else
            return na;
    }).map(na -> na.stream().collect(Collectors.toSet())).orElse(Collections.emptySet());

    final BucketActionMessage.UpdateBucketActionMessage update_message = new BucketActionMessage.UpdateBucketActionMessage(
            new_object, !status.suspended(), old_version, node_affinity);

    final CompletableFuture<Collection<BasicMessageBean>> management_results = MgmtCrudUtils
            .applyRetriableManagementOperation(new_object, actor_context, retry_store, update_message,
                    source -> new BucketActionMessage.UpdateBucketActionMessage(new_object, !status.suspended(),
                            old_version, new HashSet<String>(Arrays.asList(source))));

    // Special case: if the bucket has no node affinity (something went wrong earlier) but now it does, then update:
    final boolean lock_to_nodes = Optional.ofNullable(new_object.lock_to_nodes()).orElse(true);
    if (node_affinity.isEmpty()) {
        final CompletableFuture<Boolean> update_future = MgmtCrudUtils.applyNodeAffinityWrapper(new_object,
                status_store, management_results);
        return management_results.thenCombine(update_future, (mgmt, update) -> mgmt);
    } else if (!lock_to_nodes && Optional.ofNullable(status.confirmed_suspended()).orElse(false)) { // previously had a node affinity, remove now that we're definitely suspended
        final CompletableFuture<Boolean> update_future = status_store.updateObjectById(new_object._id(),
                CrudUtils.update(DataBucketStatusBean.class).unset(DataBucketStatusBean::node_affinity));
        return management_results.thenCombine(update_future, (mgmt, update) -> mgmt);
    } else {
        return management_results;
    }
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Combine the analytic thread level results and the per-job results into a single reply
 * @param top_level/*w  ww  .  j a va 2s. c o  m*/
 * @param per_job
 * @param source
 * @return
 */
protected final static CompletableFuture<BucketActionReplyMessage> combineResults(
        final CompletableFuture<BasicMessageBean> top_level,
        final List<CompletableFuture<BasicMessageBean>> per_job, final String source) {
    if (per_job.isEmpty()) {
        return top_level.thenApply(reply -> new BucketActionHandlerMessage(source, reply));
    } else { // slightly more complex:

        // First off wait for them all to complete:
        final CompletableFuture<?>[] futures = per_job.toArray(new CompletableFuture<?>[0]);

        return top_level.thenCombine(CompletableFuture.allOf(futures), (thread, __) -> {
            List<BasicMessageBean> replies = Stream.concat(Lambdas.get(() -> {
                if (thread.success() && ((null == thread.message()) || thread.message().isEmpty())) {
                    // Ignore top level, it's not very interesting
                    return Stream.empty();
                } else
                    return Stream.of(thread);
            }), per_job.stream().map(cf -> cf.join())

            ).collect(Collectors.toList());

            return (BucketActionReplyMessage) new BucketActionCollectedRepliesMessage(source, replies,
                    Collections.emptySet(), Collections.emptySet());
        }).exceptionally(t -> {
            return (BucketActionReplyMessage) new BucketActionHandlerMessage(source,
                    ErrorUtils.buildErrorMessage(DataBucketAnalyticsChangeActor.class.getSimpleName(), source,
                            ErrorUtils.getLongForm("{0}", t)));
        });
    }
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

/**
 * if timestamp is < create time of stream, we will return empty list.
 * 1. perform binary searchIndex on index table to find timestamp
 * 2. fetch the record from history table for the pointer in index.
 * Note: index may be stale so we may need to fall through
 * 3. parse the row and return the list of integers
 *
 * @param timestamp point in time./* ww  w .j a  v  a 2  s  .com*/
 * @return : list of active segment numbers at given time stamp
 */
@Override
public CompletableFuture<List<Integer>> getActiveSegments(final long timestamp) {
    final CompletableFuture<Data<T>> indexFuture = verifyLegalState().thenCompose(v -> getIndexTable());

    final CompletableFuture<Data<T>> historyFuture = getHistoryTable();

    return indexFuture.thenCombine(historyFuture, (indexTable, historyTable) -> TableHelper
            .getActiveSegments(timestamp, indexTable.getData(), historyTable.getData()));
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketStatusCrudService.java

@Override
public ManagementFuture<Boolean> updateObjectBySpec(final QueryComponent<DataBucketStatusBean> unique_spec,
        final Optional<Boolean> upsert, final UpdateComponent<DataBucketStatusBean> update) {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    if (upsert.orElse(false)) {
        throw new RuntimeException("This method is not supported with upsert set and true");
    }//  w w w .j a  v  a  2 s .co  m

    final Collection<BasicMessageBean> errors = validateUpdateCommand(update);
    if (!errors.isEmpty()) {
        return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false),
                CompletableFuture.completedFuture(errors));
    }

    // Now perform the update and based on the results we may need to send out instructions
    // to any listening buckets

    final CompletableFuture<Optional<DataBucketStatusBean>> update_reply = _underlying_data_bucket_status_db
            .get().updateAndReturnObjectBySpec(unique_spec, Optional.of(false), update, Optional.of(false),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until),
                            helper.field(DataBucketStatusBean::node_affinity)),
                    true);

    try {
        // What happens now depends on the contents of the message         

        // Maybe the user wanted to suspend/resume the bucket:

        final CompletableFuture<Collection<BasicMessageBean>> suspend_future = Lambdas
                .<CompletableFuture<Collection<BasicMessageBean>>>get(() -> {
                    if (update.getAll().containsKey(helper.field(DataBucketStatusBean::suspended))) {

                        // (note this handles suspending the bucket if no handlers are available)
                        return getOperationFuture(update_reply, sb -> sb.suspended(),
                                _underlying_data_bucket_db.get(), _underlying_data_bucket_status_db.get(),
                                _actor_context, _bucket_action_retry_store.get());
                    } else { // (this isn't an error, just nothing to do here)
                        return CompletableFuture.completedFuture(Collections.<BasicMessageBean>emptyList());
                    }
                });

        // Maybe the user wanted to set quarantine on/off:

        final CompletableFuture<Collection<BasicMessageBean>> quarantine_future = Lambdas
                .<CompletableFuture<Collection<BasicMessageBean>>>get(() -> {
                    if (update.getAll().containsKey(helper.field(DataBucketStatusBean::quarantined_until))) {

                        // (note this handles suspending the bucket if no handlers are available)
                        return getOperationFuture(update_reply, sb -> { // (this predicate is slightly more complex)
                            return (null != sb.quarantined_until())
                                    || (new Date().getTime() < sb.quarantined_until().getTime());
                        }, _underlying_data_bucket_db.get(), _underlying_data_bucket_status_db.get(),
                                _actor_context, _bucket_action_retry_store.get());
                    } else { // (this isn't an error, just nothing to do here)
                        return CompletableFuture.completedFuture(Collections.<BasicMessageBean>emptyList());
                    }
                });

        return FutureUtils.createManagementFuture(update_reply.thenApply(o -> o.isPresent()), // whether we updated
                suspend_future.thenCombine(quarantine_future,
                        (f1, f2) -> Stream.concat(f1.stream(), f2.stream()).collect(Collectors.toList())));
        //(+combine error messages from suspend/quarantine operations)
    } catch (Exception e) {
        // This is a serious enough exception that we'll just leave here
        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
}