Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:org.apache.servicecomb.foundation.vertx.http.ReadStreamPart.java

protected void onFileOpened(File file, AsyncResult<AsyncFile> ar, CompletableFuture<File> future) {
    if (ar.failed()) {
        future.completeExceptionally(ar.cause());
        return;/*from  w ww. ja va2s .  co  m*/
    }

    AsyncFile asyncFile = ar.result();
    CompletableFuture<Void> saveFuture = saveToWriteStream(asyncFile);
    saveFuture.whenComplete((v, saveException) -> {
        asyncFile.close(closeAr -> {
            if (closeAr.failed()) {
                LOGGER.error("Failed to close file {}.", file);
            }

            // whatever close success or failed
            // will not affect to result
            // result just only related to write
            if (saveException == null) {
                future.complete(file);
                return;
            }

            future.completeExceptionally(saveException);
        });
    });
}

From source file:io.pravega.controller.store.stream.ZKStream.java

@Override
CompletableFuture<Data<Integer>> getMarkerData(int segmentNumber) {
    final CompletableFuture<Data<Integer>> result = new CompletableFuture<>();
    final String path = ZKPaths.makePath(markerPath, String.format("%d", segmentNumber));
    cache.getCachedData(path).whenComplete((res, ex) -> {
        if (ex != null) {
            Throwable cause = ExceptionHelpers.getRealException(ex);
            if (cause instanceof StoreException.DataNotFoundException) {
                result.complete(null);
            } else {
                result.completeExceptionally(cause);
            }/*  w ww.  j  a  v a 2 s.co  m*/
        } else {
            result.complete(res);
        }
    });

    return result;
}

From source file:org.auraframework.modules.impl.ModulesCompilerJ2V8.java

private ModulesCompilerData compile(String entry, String options) throws Exception {
    String script = "" + "const compiler = require('" + ModulesCompilerUtil.COMPILER_JS_PATH + "');"
            + "const promise = compiler.compile('" + entry + "', " + options + ");"
            + "promise.then(onResultCallback).catch(onErrorCallback);";

    CompletableFuture<ModulesCompilerData> future = new CompletableFuture<>();

    JavaVoidCallback onErrorCallback = new JavaVoidCallback() {
        @Override/*  w  ww . ja  va2 s.  c o m*/
        public void invoke(final V8Object receiver, final V8Array parameters) {
            String error = parameters.toString();
            future.completeExceptionally(new RuntimeException(error));
            logger.warning("ModulesCompilerJ2v8: error " + entry + ": " + error);
        }
    };
    JavaVoidCallback onResultCallback = new JavaVoidCallback() {
        @Override
        public void invoke(final V8Object receiver, final V8Array parameters) {
            ModulesCompilerData data = ModulesCompilerUtil.parseCompilerOutput(parameters.getObject(0));
            future.complete(data);
        }
    };

    NodeJS nodeJS = J2V8Util.createNodeJS();

    MemoryManager memoryManager = new MemoryManager(nodeJS.getRuntime());
    nodeJS.getRuntime().registerJavaMethod(onErrorCallback, "onErrorCallback");
    nodeJS.getRuntime().registerJavaMethod(onResultCallback, "onResultCallback");

    File tempScript = ModulesCompilerUtil.createTempScriptFile(script, "temp");
    try {
        nodeJS.exec(tempScript);
        while (nodeJS.isRunning()) {
            nodeJS.handleMessage();
        }
    } finally {
        memoryManager.release();
        nodeJS.release();
        tempScript.delete();
    }

    return future.get();
}

From source file:io.pravega.service.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testStartAndStopContainer() throws Exception {
    @Cleanup// www  .  ja  v  a2s .c  om
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that starts successfully.
    CompletableFuture<ContainerHandle> startupFuture = new CompletableFuture<>();
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);

    // Now modify the ZK entry.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Container finished starting.
    startupFuture.complete(containerHandle);
    verify(containerRegistry, timeout(10000).atLeastOnce()).startContainer(eq(2), any());

    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(2));

    // Now modify the ZK entry. Remove container 2 and add 1.
    HashMap<Host, Set<Integer>> newMapping = new HashMap<>();
    newMapping.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(1));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(newMapping));

    // Verify that stop is called and only the newly added container is in running state.
    when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));
    verify(containerRegistry, timeout(10000).atLeastOnce()).stopContainer(any(), any());

    // Using wait here to ensure the private data structure is updated.
    // TODO: Removing dependency on sleep here and other places in this class
    // - https://github.com/pravega/pravega/issues/1079
    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(1));
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> setConfigurationData(StreamConfiguration configuration) {
    Preconditions.checkNotNull(configuration);

    CompletableFuture<Void> result = new CompletableFuture<>();

    synchronized (lock) {
        if (this.configuration == null) {
            result.completeExceptionally(StoreException.create(StoreException.Type.DATA_NOT_FOUND, getName()));
        } else {/*from  w ww .j ava 2 s . c o  m*/
            this.configuration = configuration;
            result.complete(null);
        }
    }
    return result;
}

From source file:com.ikanow.aleph2.storm.harvest_technology.StormHarvestTechnologyModule.java

@Override
public CompletableFuture<BasicMessageBean> onNewSource(DataBucketBean new_bucket, IHarvestContext context,
        boolean enabled) {
    logger.info("received new source request, enabled: " + enabled);
    CompletableFuture<BasicMessageBean> future = new CompletableFuture<BasicMessageBean>();
    if (enabled) {

        //build out a topology for these config options
        String job_name = getJobName(new_bucket);
        StormTopology topology = null;//w ww  .  j  a  va  2  s  .c om
        try {
            topology = StormHarvestTechnologyTopologyUtil.createTopology(new_bucket.harvest_configs(), job_name,
                    context, new_bucket);
        } catch (Exception e) {
            //set failure in completable future
            future.complete(new BasicMessageBean(new Date(), false, null, "onNewSource", null,
                    ErrorUtils.getLongForm("{0}", e), null));
            return future;
        }

        try {
            //step1 create a megajar from:
            //context.getHarvestLibraries(Optional.of(new_bucket));
            //and whatever jars i need to read raw data, parse that data, output to context.stream();               
            //step2 send this jar + topology to storm so it starts   
            logger.debug("creating jar to submit");
            final String input_jar_location = System.getProperty("java.io.tmpdir") + File.separator + job_name
                    + ".jar";
            List<String> jars_to_merge = new ArrayList<String>();
            jars_to_merge.addAll(context.getHarvestContextLibraries(Optional.empty()));
            if (isOnlyHadoopDep(jars_to_merge)) { // special case: no aleph2 libs found, this is almost certainly because this is being run from eclipse...
                final GlobalPropertiesBean globals = ModuleUtils.getGlobalProperties();
                logger.warn(
                        "WARNING: no library files found, probably because this is running from an IDE - instead taking all JARs from: "
                                + (globals.local_root_dir() + "/lib/"));
                try {
                    //... and LiveInjecter doesn't work on classes ... as a backup just copy everything from "<LOCAL_ALEPH2_HOME>/lib" into there 
                    jars_to_merge
                            .addAll(FileUtils
                                    .listFiles(new File(globals.local_root_dir() + "/lib/"),
                                            new String[] { "jar" }, false)
                                    .stream().map(File::toString).filter(file -> {
                                        return !(file.contains("aleph2_storm_dependencies")
                                                || file.contains("aleph2_analytical_services_storm"));
                                    }).collect(Collectors.toList()));
                } catch (Exception e) {
                    throw new RuntimeException("In eclipse/IDE mode, directory not found: "
                            + (globals.local_root_dir() + "/lib/"));
                }
            }

            //filter the harvester out of the harvest libraries
            Map<String, String> harvest_libraries = context.getHarvestLibraries(Optional.of(new_bucket)).get();
            //kick the harvest library out of our jar (it contains storm.jar which we can't send to storm)
            List<String> harvest_library_paths = harvest_libraries.keySet().stream()
                    .filter(name -> !name.contains(new_bucket.harvest_technology_name_or_id()))
                    .map(name -> harvest_libraries.get(name)).collect(Collectors.toList());
            jars_to_merge.addAll(harvest_library_paths);

            JarBuilderUtil.mergeJars(jars_to_merge, input_jar_location, dirs_to_ignore);
            StormControllerUtil.startJob(storm_controller, job_name, input_jar_location, topology);

            //verify job was assigned some executors
            TopologyInfo info = StormControllerUtil.getJobStats(storm_controller, job_name);
            if (info.get_executors_size() == 0) {
                //no executors were available for this job, stop the job, throw an error
                StormControllerUtil.stopJob(storm_controller, job_name);
                future.complete(new BasicMessageBean(new Date(), false, null, "onNewSource", null,
                        "No executors were assigned to this job, typically this is because too many jobs are currently running, kill some other jobs and resubmit.",
                        null));
                return future;
            }
        } catch (Exception e) {
            //set failure in completable future
            future.complete(new BasicMessageBean(new Date(), false, null, "onNewSource", null,
                    ErrorUtils.getLongForm("{0}", e), null));
            return future;
        }
    }

    //TODO return something useful
    future.complete(new BasicMessageBean(new Date(), true, null, "onNewSource", null, null, null));
    return future;
}

From source file:io.pravega.segmentstore.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testStartAndStopContainer() throws Exception {
    @Cleanup//from  w ww.  ja va 2 s  .c om
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that starts successfully.
    CompletableFuture<ContainerHandle> startupFuture = new CompletableFuture<>();
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);

    // Now modify the ZK entry.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Container finished starting.
    startupFuture.complete(containerHandle);
    verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());

    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(2));

    // Now modify the ZK entry. Remove container 2 and add 1.
    HashMap<Host, Set<Integer>> newMapping = new HashMap<>();
    newMapping.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(1));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(newMapping));

    // Verify that stop is called and only the newly added container is in running state.
    when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));
    verify(containerRegistry, timeout(1000).atLeastOnce()).stopContainer(any(), any());

    // Using wait here to ensure the private data structure is updated.
    // TODO: Removing dependency on sleep here and other places in this class
    // - https://github.com/pravega/pravega/issues/1079
    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(1));
}

From source file:org.apache.pulsar.client.impl.ClientCnx.java

@Override
protected void handleSuccess(CommandSuccess success) {
    checkArgument(state == State.Ready);

    if (log.isDebugEnabled()) {
        log.debug("{} Received success response from server: {}", ctx.channel(), success.getRequestId());
    }//from  ww w .  j  a  v a2 s. c  om
    long requestId = success.getRequestId();
    CompletableFuture<ProducerResponse> requestFuture = pendingRequests.remove(requestId);
    if (requestFuture != null) {
        requestFuture.complete(null);
    } else {
        log.warn("{} Received unknown request id from server: {}", ctx.channel(), success.getRequestId());
    }
}

From source file:org.apache.pulsar.client.impl.ClientCnx.java

@Override
protected void handleProducerSuccess(CommandProducerSuccess success) {
    checkArgument(state == State.Ready);

    if (log.isDebugEnabled()) {
        log.debug("{} Received producer success response from server: {} - producer-name: {}", ctx.channel(),
                success.getRequestId(), success.getProducerName());
    }//ww  w . ja v a2 s  . c o  m
    long requestId = success.getRequestId();
    CompletableFuture<ProducerResponse> requestFuture = pendingRequests.remove(requestId);
    if (requestFuture != null) {
        requestFuture.complete(new ProducerResponse(success.getProducerName(), success.getLastSequenceId(),
                success.getSchemaVersion().toByteArray()));
    } else {
        log.warn("{} Received unknown request id from server: {}", ctx.channel(), success.getRequestId());
    }
}

From source file:org.apache.pulsar.functions.instance.JavaInstanceRunnableProcessTest.java

@Test
public void testAtMostOnceProcessing() throws Exception {
    FunctionDetails newFunctionDetails = FunctionDetails.newBuilder(functionDetails)
            .setProcessingGuarantees(ProcessingGuarantees.ATMOST_ONCE).build();
    config.setFunctionDetails(newFunctionDetails);

    @Cleanup("shutdown")
    ExecutorService executorService = Executors.newSingleThreadExecutor();

    try (JavaInstanceRunnable runnable = new JavaInstanceRunnable(config, fnCache, "test-jar-file", mockClient,
            null)) {/*from ww w .  j  a v  a  2  s .c  o  m*/

        executorService.submit(runnable);

        Pair<String, String> consumerId = Pair.of(newFunctionDetails.getInputs(0),
                FunctionDetailsUtils.getFullyQualifiedName(newFunctionDetails));
        ConsumerInstance consumerInstance = mockConsumers.get(consumerId);
        while (null == consumerInstance) {
            TimeUnit.MILLISECONDS.sleep(20);
            consumerInstance = mockConsumers.get(consumerId);
        }

        ProducerInstance producerInstance = mockProducers.values().iterator().next();

        // once we get consumer id, simulate receiving 10 messages from consumer
        for (int i = 0; i < 10; i++) {
            Message msg = mock(Message.class);
            when(msg.getData()).thenReturn(("message-" + i).getBytes(UTF_8));
            when(msg.getMessageId()).thenReturn(new MessageIdImpl(1L, i, 0));
            consumerInstance.addMessage(msg);
            consumerInstance.getConf().getMessageListener().received(consumerInstance.getConsumer(), msg);
        }

        // wait until all the messages are published
        for (int i = 0; i < 10; i++) {
            Message msg = producerInstance.msgQueue.take();

            assertEquals("message-" + i + "!", new String(msg.getData(), UTF_8));
            // sequence id is not set for AT_MOST_ONCE processing
            assertEquals(0L, msg.getSequenceId());
        }

        // verify acknowledge before send completes
        verify(consumerInstance.getConsumer(), times(10)).acknowledgeAsync(any(Message.class));
        assertEquals(0, consumerInstance.getNumMessages());

        // complete all the publishes
        synchronized (producerInstance) {
            for (CompletableFuture<MessageId> future : producerInstance.sendFutures) {
                future.complete(mock(MessageId.class));
            }
        }

        // acknowledges count should remain same
        verify(consumerInstance.getConsumer(), times(10)).acknowledgeAsync(any(Message.class));
    }
}