Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:com.ikanow.aleph2.graph.titan.services.TitanGraphService.java

@Override
public CompletableFuture<BasicMessageBean> handleAgeOutRequest(DataBucketBean bucket) {
    // TODO (ALEPH-15): implement various temporal handling features (don't return error though, just do nothing)
    return CompletableFuture.completedFuture(ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(),
            "handleAgeOutRequest", ErrorUtils.NOT_YET_IMPLEMENTED, "handleAgeOutRequest"));
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Data<Integer>> getHistoryTable() {
    synchronized (lock) {
        if (this.historyTable == null) {
            return FutureHelpers
                    .failedFuture(StoreException.create(StoreException.Type.DATA_NOT_FOUND, getName()));
        }/*w w w  .  j ava  2s  .  c  o  m*/

        return CompletableFuture.completedFuture(copy(historyTable));
    }
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//from  w ww.j ava 2  s.  co  m
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> createEpochNodeIfAbsent(int epoch) {
    Preconditions.checkArgument(epochTxnMap.size() <= 2);
    activeEpoch.compareAndSet(epoch - 1, epoch);
    synchronized (txnsLock) {
        epochTxnMap.putIfAbsent(epoch, new HashSet<>());
    }//from  w ww .  j a v a  2  s .c o  m
    return CompletableFuture.completedFuture(null);
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_TestBuckets.java

/**
 * Logic that runs when we come across an old test object
 * Check if source is done:// w ww . j a v  a  2 s  .co  m
 * A. Has timed out
 * B. Has created enough results
 * If either are true, copy over what test results there are (if any), mark as done
 * 
 * @param data_bucket
 * @param old_test_source
 * @param source_test_db
 * @return
 */
private CompletableFuture<Boolean> handleExistingTestSource(final DataBucketBean data_bucket,
        final TestQueueBean old_test_source, final ICrudService<TestQueueBean> source_test_db) {

    // if null==started_processing_on, then source is still being started in a different thread, so just ignore it:
    if (null == old_test_source.started_processing_on()) {
        return CompletableFuture.completedFuture(true);
    }

    //ENTRY: is old      
    final ProcessingTestSpecBean test_spec = old_test_source.test_params();
    //get v1 bucket
    return getTestOutputCrudService(data_bucket).map(v2_output_db -> {
        //got the output crud, check if time is up or we have enough test results
        //1: time is up by checking started_on+test_spec vs now
        final long max_run_time_secs = Optional.ofNullable(test_spec.max_run_time_secs()).orElse(60L);
        final long time_expires_on = old_test_source.started_processing_on().getTime()
                + (max_run_time_secs * 1000L);
        if (new Date().getTime() > time_expires_on) {
            _logger.debug("Test job: " + data_bucket.full_name() + " expired, need to retire");
            return retireTestJob(data_bucket, old_test_source, source_test_db, v2_output_db);
        }
        //2: test results, if we've hit the requested num results
        return checkTestHitRequestedNumResults(v2_output_db, data_bucket, test_spec, old_test_source,
                source_test_db);
    }).orElseGet(() -> {
        //we couldn't get the output crud, need to exit out
        //complete exceptionally so sync will throw an error
        _logger.error("Error getting test output crud");
        CompletableFuture<Boolean> db_error_future = new CompletableFuture<Boolean>();
        db_error_future.completeExceptionally(
                new Exception("Error retrieving output db for test job: " + data_bucket._id()));
        return db_error_future;
    });
}

From source file:com.ikanow.aleph2.harvest.logstash.services.LogstashHarvestService.java

@Override
public CompletableFuture<BasicMessageBean> onTestSource(DataBucketBean test_bucket,
        ProcessingTestSpecBean test_spec, IHarvestContext context) {
    context.getLogger(Optional.empty()).log(Level.TRACE,
            ErrorUtils.lazyBuildMessage(false, () -> this.getClass().getSimpleName(), () -> "onTestSource",
                    () -> null, () -> "Logstash about to run a test boss!", () -> Collections.emptyMap()));
    // Kill any previously running tests
    ProcessUtils.stopProcess(this.getClass().getSimpleName(), test_bucket,
            _global_propertes.get().local_root_dir() + LOCAL_RUN_DIR_SUFFIX, Optional.of(2));

    // Build the logstash config file      
    final LogstashBucketConfigBean config = Optionals.ofNullable(test_bucket.harvest_configs()).stream()
            .findFirst().map(cfg -> BeanTemplateUtils.from(cfg.config(), LogstashBucketConfigBean.class).get())
            .orElse(BeanTemplateUtils.build(LogstashBucketConfigBean.class).done().get());

    final Validation<BasicMessageBean, String> ls_config = getLogstashFormattedConfig(test_bucket, config,
            _globals.get(), context);//from  ww w .  j a  va2 s .  com
    if (ls_config.isFail()) {
        return CompletableFuture.completedFuture(ls_config.fail());
    }

    // Launch the binary in a separate process      
    final ProcessBuilder pb = LogstashUtils.buildLogstashTest(_globals.get(), config, ls_config.success(),
            Optional.ofNullable(test_spec.requested_num_objects()).orElse(10L),
            Optional.of(test_bucket.full_name()));

    final Tuple2<String, String> err_pid = ProcessUtils.launchProcess(pb, this.getClass().getSimpleName(),
            test_bucket, _global_propertes.get().local_root_dir() + LOCAL_RUN_DIR_SUFFIX,
            Optional.of(new Tuple2<Long, Integer>(test_spec.max_run_time_secs(), 2)));

    if (null != err_pid._1()) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                "onTestSource", "Bucket error: " + err_pid._1()));
    } else {
        return CompletableFuture.completedFuture(ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(),
                "onTestSource", "Bucket launched: " + err_pid._2()));
    }
}

From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java

/**
 * Truncate a stream./*  w  ww .  ja  va 2 s .c o m*/
 *
 * @param scope      scope.
 * @param stream     stream name.
 * @param streamCut  stream cut.
 * @param contextOpt optional context
 * @return update status.
 */
public CompletableFuture<UpdateStreamStatus.Status> truncateStream(final String scope, final String stream,
        final Map<Integer, Long> streamCut, final OperationContext contextOpt) {
    final OperationContext context = contextOpt == null ? streamMetadataStore.createContext(scope, stream)
            : contextOpt;

    // 1. get stream cut
    return startTruncation(scope, stream, streamCut, context)
            // 4. check for truncation to complete
            .thenCompose(truncationStarted -> {
                if (truncationStarted) {
                    return checkDone(() -> isTruncated(scope, stream, streamCut, context))
                            .thenApply(y -> UpdateStreamStatus.Status.SUCCESS);
                } else {
                    log.warn("Unable to start truncation for {}/{}", scope, stream);
                    return CompletableFuture.completedFuture(UpdateStreamStatus.Status.FAILURE);
                }
            }).exceptionally(ex -> {
                log.warn("Exception thrown in trying to update stream configuration {}", ex);
                return handleUpdateStreamError(ex);
            });
}

From source file:com.linkedin.restli.server.TestRestLiServer.java

private void testValidRequest(RestLiServer restLiServer, final ProtocolVersion clientProtocolVersion,
        boolean filters, final String headerConstant, final RestOrStream restOrStream)
        throws URISyntaxException {
    RestRequest request = null;/*from  w  ww . j a  va 2 s .com*/
    StreamRequest streamRequest = null;
    if (clientProtocolVersion != null) {
        if (restOrStream == RestOrStream.REST) {
            request = new RestRequestBuilder(new URI("/statuses/1"))
                    .setHeader(headerConstant, clientProtocolVersion.toString()).build();
        } else {
            streamRequest = new StreamRequestBuilder(new URI("/statuses/1"))
                    .setHeader(headerConstant, clientProtocolVersion.toString())
                    .build(EntityStreams.emptyStream());
        }
    } else {
        if (restOrStream == RestOrStream.REST) {
            request = new RestRequestBuilder(new URI("/statuses/1")).build();
        } else {
            streamRequest = new StreamRequestBuilder(new URI("/statuses/1")).build(EntityStreams.emptyStream());
        }
    }

    final StatusCollectionResource statusResource = getMockResource(StatusCollectionResource.class);
    EasyMock.expect(statusResource.get(eq(1L))).andReturn(buildStatusRecord()).once();
    if (filters) {
        _mockFilter.onRequest(EasyMock.anyObject(FilterRequestContext.class));
        EasyMock.expectLastCall().andAnswer(new IAnswer<Object>() {
            @Override
            public Object answer() throws Throwable {
                return CompletableFuture.completedFuture(null);
            }
        }).times(1);

        _mockFilter.onResponse(EasyMock.anyObject(FilterRequestContext.class),
                EasyMock.anyObject(FilterResponseContext.class));
        EasyMock.expectLastCall().andAnswer(new IAnswer<Object>() {
            @Override
            public Object answer() throws Throwable {
                return CompletableFuture.completedFuture(null);
            }
        }).times(1);
        EasyMock.replay(_mockFilter);
    }
    EasyMock.replay(statusResource);

    final Callback<RestResponse> restResponseCallback = new Callback<RestResponse>() {
        @Override
        public void onSuccess(RestResponse restResponse) {
            assertEquals(restResponse.getStatus(), 200);
            assertTrue(restResponse.getEntity().length() > 0);
            EasyMock.verify(statusResource);
            EasyMock.reset(statusResource);

            if (clientProtocolVersion != null) {
                assertEquals(RestConstants.HEADER_RESTLI_PROTOCOL_VERSION, headerConstant,
                        "Rest.li protocol header name is unexpected.");
            }
        }

        @Override
        public void onError(Throwable e) {
            fail();
        }
    };

    if (restOrStream == RestOrStream.REST) {
        restLiServer.handleRequest(request, new RequestContext(), restResponseCallback);
    } else {
        Callback<StreamResponse> streamResponseCallback = new Callback<StreamResponse>() {
            @Override
            public void onSuccess(StreamResponse streamResponse) {
                Messages.toRestResponse(streamResponse, new Callback<RestResponse>() {
                    @Override
                    public void onError(Throwable e) {
                        Assert.fail();
                    }

                    @Override
                    public void onSuccess(RestResponse result) {
                        restResponseCallback.onSuccess(result);
                    }
                });
            }

            @Override
            public void onError(Throwable e) {
                fail();
            }
        };

        restLiServer.handleRequest(streamRequest, new RequestContext(), streamResponseCallback);
    }
    if (filters) {
        EasyMock.verify(_mockFilter, _mockFilter);
    }
}

From source file:com.ikanow.aleph2.graph.titan.services.TitanGraphService.java

/** Deletes a bucket
 * @param bucket/* w w  w .jav a  2s .c o m*/
 * @param secondary_buffer
 * @param bucket_or_buffer_getting_deleted
 * @return
 */
private CompletableFuture<BasicMessageBean> handleBucketDeletionRequest_internal(DataBucketBean bucket,
        Optional<String> secondary_buffer, boolean bucket_or_buffer_getting_deleted) {

    //TODO (ALEPH-15): check if the indexes exist - just return if so

    if (secondary_buffer.isPresent()) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                "handleBucketDeletionRequest", ErrorUtils.BUFFERS_NOT_SUPPORTED, bucket.full_name()));
    }

    //TODO (ALEPH-15): At some point need to be able for services to (optionally) request batch enrichment jobs - eg would be much nicer to fire this off as a distributed job

    return CompletableFuture.runAsync(() -> {

        try {
            Thread.sleep(1000L);
        } catch (Exception e) {
        } // just check the indexes have refreshed...

        final TitanTransaction tx = _titan.buildTransaction().start();

        //DEBUG
        //final com.fasterxml.jackson.databind.ObjectMapper titan_mapper = _titan.io(org.apache.tinkerpop.gremlin.structure.io.IoCore.graphson()).mapper().create().createMapper();

        @SuppressWarnings("unchecked")
        final Stream<TitanVertex> vertices_to_check = Optionals.<TitanVertex>streamOf(
                tx.query().has(GraphAnnotationBean.a2_p, bucket.full_name()).vertices(), false);
        vertices_to_check.forEach(v -> {
            {
                final Iterator<VertexProperty<String>> props = v.<String>properties(GraphAnnotationBean.a2_p);
                while (props.hasNext()) {
                    final VertexProperty<String> prop = props.next();
                    if (bucket.full_name().equals(prop.value())) {
                        prop.remove();
                    }
                }
            }
            {
                final Iterator<VertexProperty<String>> props = v.<String>properties(GraphAnnotationBean.a2_p);
                if (!props.hasNext()) { // can delete this bucket
                    v.remove();
                }
            }
        });
        @SuppressWarnings("unchecked")
        final Stream<TitanEdge> edges_to_check = Optionals.<TitanEdge>streamOf(
                tx.query().has(GraphAnnotationBean.a2_p, bucket.full_name()).edges(), false);
        edges_to_check.forEach(e -> {
            e.remove(); // (can only have one edge so delete it)
        });

        tx.commit();
    }).thenApply(__ -> ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(),
            "handleBucketDeletionRequest", "Completed", "handleBucketDeletionRequest"))
            .exceptionally(t -> ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "handleBucketDeletionRequest", ErrorUtils.getLongForm("{0}", t),
                    "handleBucketDeletionRequest"));

}

From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java

@Override
public CompletableFuture<Boolean> deleteObjectById(Object id) {
    try {/*w w  w. j  a v a2 s  . com*/
        final Path path = new Path(output_directory + id.toString());
        _logger.debug("Trying to delete: " + path.toString());
        final boolean delete_success = fileContext.delete(path, false); //this is always returning false
        _logger.debug("success deleteing: " + delete_success);
        return CompletableFuture.completedFuture(!doesPathExist(path, fileContext)); //if file does not exist, delete was a success
    } catch (IllegalArgumentException | IOException e) {
        final CompletableFuture<Boolean> fut = new CompletableFuture<Boolean>();
        fut.completeExceptionally(e);
        return fut;
    }
}