Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:io.pravega.service.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testShutdownNotYetStartedContainer() throws Exception {
    @Cleanup// ww w  .j  ava 2  s.c o  m
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that takes a long time to start. Should be greater than a few monitor loops.
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    CompletableFuture<ContainerHandle> startupFuture = FutureHelpers
            .delayedFuture(() -> CompletableFuture.completedFuture(containerHandle), 3000, executorService());
    when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);

    // Use ZK to send that information to the Container Manager.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Verify it's not yet started.
    verify(containerRegistry, timeout(10000).atLeastOnce()).startContainer(eq(2), any());
    assertEquals(0, segMonitor.getRegisteredContainers().size());

    // Now simulate shutting it down.
    when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));

    currentData.clear();
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    verify(containerRegistry, timeout(10000).atLeastOnce()).stopContainer(any(), any());
    Thread.sleep(2000);
    assertEquals(0, segMonitor.getRegisteredContainers().size());
}

From source file:io.pravega.segmentstore.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testShutdownNotYetStartedContainer() throws Exception {
    @Cleanup/*  w  ww  . j  a  va 2 s. co m*/
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that takes a long time to start. Should be greater than a few monitor loops.
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    CompletableFuture<ContainerHandle> startupFuture = FutureHelpers
            .delayedFuture(() -> CompletableFuture.completedFuture(containerHandle), 3000, executorService());
    when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);

    // Use ZK to send that information to the Container Manager.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Verify it's not yet started.
    verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());
    assertEquals(0, segMonitor.getRegisteredContainers().size());

    // Now simulate shutting it down.
    when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));

    currentData.clear();
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    verify(containerRegistry, timeout(10000).atLeastOnce()).stopContainer(any(), any());
    Thread.sleep(2000);
    assertEquals(0, segMonitor.getRegisteredContainers().size());
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> createStateIfAbsent(State state) {
    Preconditions.checkNotNull(state);/*from   ww w.j  a v  a  2 s. c  o  m*/

    synchronized (lock) {
        if (this.state == null) {
            this.state = new Data<>(SerializationUtils.serialize(state), 0);
        }
    }
    return CompletableFuture.completedFuture(null);
}

From source file:com.spotify.styx.api.BackfillResource.java

public CompletionStage<Response<ByteString>> haltBackfill(String id, RequestContext rc) {
    try {/*from   w  w w .j a  v  a2 s .c o  m*/
        final Optional<Backfill> backfillOptional = storage.backfill(id);
        if (backfillOptional.isPresent()) {
            final Backfill backfill = backfillOptional.get();
            storage.storeBackfill(backfill.builder().halted(true).build());
            return haltActiveBackfillInstances(backfill, rc.requestScopedClient());
        } else {
            return CompletableFuture.completedFuture(
                    Response.forStatus(Status.NOT_FOUND.withReasonPhrase("backfill not found")));
        }
    } catch (IOException e) {
        return CompletableFuture.completedFuture(Response.forStatus(
                Status.INTERNAL_SERVER_ERROR.withReasonPhrase("could not halt backfill: " + e.getMessage())));
    }
}

From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java

@Override
public ManagementFuture<Optional<T>> getObjectById(Object id, List<String> field_list, boolean include) {
    return FutureUtils
            .createManagementFuture(CompletableFuture.completedFuture(_mutable_values.stream().findFirst()));
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Starts up a storm job./* w w  w  .j  a  v  a2  s .  co  m*/
 * 
 * 1. gets the storm instance from the yarn config
 * 2. Makes a mega jar consisting of:
 *    A. Underlying artefacts (system libs)
 *  B. User supplied libraries
 * 3. Submit megajar to storm with jobname of the bucket id
 * 
 * @param bucket
 * @param underlying_artefacts
 * @param yarn_config_dir
 * @param user_lib_paths
 * @param topology
 * @return
 */
public static CompletableFuture<BasicMessageBean> startJob(final IStormController storm_controller,
        final DataBucketBean bucket, final Optional<String> sub_job,
        final Collection<Object> underlying_artefacts, final Collection<String> user_lib_paths,
        final StormTopology topology, final Map<String, String> config, final String cached_jar_dir) {
    if (null == topology) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(StormControllerUtil.class,
                "startJob", ErrorUtils.TOPOLOGY_NULL_ERROR, bucket.full_name()));
    }

    _logger.info("Retrieved user Storm config topology: spouts=" + topology.get_spouts_size() + " bolts="
            + topology.get_bolts_size() + " configs=" + config.toString());

    final Set<String> jars_to_merge = new TreeSet<String>();

    final CompletableFuture<String> jar_future = Lambdas.get(() -> {
        if (RemoteStormController.class.isAssignableFrom(storm_controller.getClass())) {
            // (This is only necessary in the remote case)

            jars_to_merge.addAll(underlying_artefacts.stream()
                    .map(artefact -> LiveInjector.findPathJar(artefact.getClass(), ""))
                    .filter(f -> !f.equals("")).collect(Collectors.toSet()));

            if (jars_to_merge.isEmpty()) { // special case: no aleph2 libs found, this is almost certainly because this is being run from eclipse...
                final GlobalPropertiesBean globals = ModuleUtils.getGlobalProperties();
                _logger.warn(
                        "WARNING: no library files found, probably because this is running from an IDE - instead taking all JARs from: "
                                + (globals.local_root_dir() + "/lib/"));
                try {
                    //... and LiveInjecter doesn't work on classes ... as a backup just copy everything from "<LOCAL_ALEPH2_HOME>/lib" into there 
                    jars_to_merge
                            .addAll(FileUtils
                                    .listFiles(new File(globals.local_root_dir() + "/lib/"),
                                            new String[] { "jar" }, false)
                                    .stream().map(File::toString).collect(Collectors.toList()));
                } catch (Exception e) {
                    throw new RuntimeException("In eclipse/IDE mode, directory not found: "
                            + (globals.local_root_dir() + "/lib/"));
                }
            }
            //add in the user libs
            jars_to_merge.addAll(user_lib_paths);

            //create jar
            return buildOrReturnCachedStormTopologyJar(jars_to_merge, cached_jar_dir);
        } else {
            return CompletableFuture.completedFuture("/unused/dummy.jar");
        }
    });

    //submit to storm
    @SuppressWarnings("unchecked")
    final CompletableFuture<BasicMessageBean> submit_future = Lambdas.get(() -> {
        long retries = 0;
        while (retries < MAX_RETRIES) {
            try {
                _logger.debug("Trying to submit job, try: " + retries + " of " + MAX_RETRIES);
                final String jar_file_location = jar_future.get();
                return storm_controller.submitJob(bucketPathToTopologyName(bucket, sub_job), jar_file_location,
                        topology, (Map<String, Object>) (Map<String, ?>) config);
            } catch (Exception ex) {
                if (ex instanceof AlreadyAliveException) {
                    retries++;
                    //sleep 1s, was seeing about 2s of sleep required before job successfully submitted on restart
                    try {
                        Thread.sleep(1000);
                    } catch (Exception e) {
                        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                        error_future.completeExceptionally(e);
                        return error_future;
                    }
                } else {
                    retries = MAX_RETRIES; //we threw some other exception, bail out
                    final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                    error_future.completeExceptionally(ex);
                    return error_future;
                }
            }
        }
        //we maxed out our retries, throw failure
        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
        error_future.completeExceptionally(new Exception(
                "Error submitting job, ran out of retries (previous (same name) job is probably still alive)"));
        return error_future;
    });
    return submit_future;
}

From source file:io.pravega.controller.server.eventProcessor.LocalController.java

@Override
public CancellableRequest<Boolean> scaleStream(final Stream stream, final List<Integer> sealedSegments,
        final Map<Double, Double> newKeyRanges, final ScheduledExecutorService executor) {
    CancellableRequest<Boolean> cancellableRequest = new CancellableRequest<>();

    startScaleInternal(stream, sealedSegments, newKeyRanges).whenComplete((startScaleResponse, e) -> {
        if (e != null) {
            cancellableRequest.start(() -> FutureHelpers.failedFuture(e), any -> true, executor);
        } else {/*from   www . j a va 2  s .c  o  m*/
            final boolean started = startScaleResponse.getStatus()
                    .equals(ScaleResponse.ScaleStreamStatus.STARTED);

            cancellableRequest.start(() -> {
                if (started) {
                    return checkScaleStatus(stream, startScaleResponse.getEpoch());
                } else {
                    return CompletableFuture.completedFuture(false);
                }
            }, isDone -> !started || isDone, executor);
        }
    });

    return cancellableRequest;
}

From source file:com.ibasco.agql.protocols.valve.source.query.client.SourceRconClient.java

/**
 * <p>Sends a command to the Source server. Authentication is REQUIRED</p>
 *
 * @param address/*w w w . j  ava 2  s .  co m*/
 *         The {@link InetSocketAddress} of the source server
 * @param command
 *         The {@link String} containing the command to be issued on the server
 *
 * @return A {@link CompletableFuture} which contains a response {@link String} returned by the server
 *
 * @throws RconNotYetAuthException
 *         thrown if not yet authenticated to the server
 * @see #authenticate(InetSocketAddress, String)
 */
public CompletableFuture<String> execute(InetSocketAddress address, String command)
        throws RconNotYetAuthException {
    if (!isAuthenticated(address))
        throw new RconNotYetAuthException(
                "You are not yet authorized to access the server's rcon interface. Please authenticate first.");

    final Integer id = SourceRconUtil.createRequestId();

    CompletableFuture<String> response;

    SourceRconCmdRequest request = new SourceRconCmdRequest(address, id, command);

    if (reauthenticate && (_reauth != null && _reauth)) {
        log.debug("Re-authenticating from server");
        response = this.authenticate(address).thenCompose(status -> {
            if (status.isAuthenticated())
                return sendRequest(request);
            else
                return CompletableFuture.completedFuture("Unable to re-authenticate from server");
        });
    } else {
        log.debug("Executing command '{}' using request id: {}", command, id);
        response = sendRequest(request);
    }

    if (response != null)
        response.whenComplete(this::reauthOnError);

    return response;
}

From source file:de.ks.file.FileViewController.java

@Subscribe
public void onRefresh(ActivityLoadFinishedEvent event) {
    log.debug("Clearing files");
    files.clear();//from  w  w w  .ja v a2  s.com
    fileReferences.clear();
    event.<FileContainer<?>>getModel().getFiles().forEach(f -> {
        File file = fileStore.getFile(f);
        fileReferences.put(file, CompletableFuture.completedFuture(f));
        addPossibleImage(file);
        files.add(file);
    });
}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.HfdsDataWriteService.java

@Override
public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(List<T> new_objects) {
    setup();/*from  w ww  .j  a  va  2s. co m*/
    _writer.get().storeObjects(new_objects);
    return CompletableFuture
            .completedFuture(Tuples._2T(() -> Collections.emptyList(), () -> (long) new_objects.size()));
}