Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:org.apache.samza.table.remote.couchbase.CouchbaseTableReadFunction.java

@Override
public CompletableFuture<V> getAsync(String key) {
    Preconditions.checkArgument(StringUtils.isNotBlank(key), "key must not be null, empty or blank");
    CompletableFuture<V> future = new CompletableFuture<>();
    Single<? extends Document<?>> singleObservable = bucket.async()
            .get(key, documentType, timeout.toMillis(), TimeUnit.MILLISECONDS).toSingle();
    singleObservable.subscribe(new SingleSubscriber<Document<?>>() {
        @Override/*w w  w  .j a  v  a 2s .c o m*/
        public void onSuccess(Document<?> document) {
            if (document != null) {
                if (document instanceof BinaryDocument) {
                    handleGetAsyncBinaryDocument((BinaryDocument) document, future, key);
                } else {
                    // V is of type JsonObject
                    future.complete((V) document.content());
                }
            } else {
                // The Couchbase async client should not return null
                future.completeExceptionally(
                        new SamzaException(String.format("Got unexpected null value from key %s", key)));
            }
        }

        @Override
        public void onError(Throwable throwable) {
            if (throwable instanceof NoSuchElementException) {
                // There is no element returned by the observable, meaning the key doesn't exist.
                future.complete(null);
            } else {
                future.completeExceptionally(
                        new SamzaException(String.format("Failed to get key %s", key), throwable));
            }
        }
    });
    return future;
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> deleteEpochNode(int epoch) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    synchronized (txnsLock) {
        if (epochTxnMap.getOrDefault(epoch, Collections.emptySet()).isEmpty()) {
            epochTxnMap.remove(epoch);/*from w w  w .  ja  va  2s .  c o  m*/
            result.complete(null);
        } else {
            result.completeExceptionally(StoreException.create(StoreException.Type.DATA_CONTAINS_ELEMENTS,
                    "Stream: " + getName() + " Epoch: " + epoch));
        }
    }
    return result;
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java

@Override
public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uid,
        Map<String, String> offloadDriverMetadata) {
    String readBucket = getReadBucket(offloadDriverMetadata);
    BlobStore readBlobstore = getReadBlobStore(offloadDriverMetadata);

    CompletableFuture<Void> promise = new CompletableFuture<>();
    scheduler.chooseThread(ledgerId).submit(() -> {
        try {/*from   w ww . j  ava 2s .com*/
            readBlobstore.removeBlobs(readBucket,
                    ImmutableList.of(dataBlockOffloadKey(ledgerId, uid), indexBlockOffloadKey(ledgerId, uid)));
            promise.complete(null);
        } catch (Throwable t) {
            log.error("Failed delete Blob", t);
            promise.completeExceptionally(t);
        }
    });

    return promise;
}

From source file:org.pentaho.di.ui.repo.controller.RepositoryConnectController.java

public String browse() {
    Spoon spoon = spoonSupplier.get();/*from ww w .  ja  v  a  2 s.c  o  m*/
    CompletableFuture<String> name = new CompletableFuture<>();
    Runnable execute = () -> {
        DirectoryDialog directoryDialog = new DirectoryDialog(spoonSupplier.get().getShell());
        name.complete(directoryDialog.open());
    };
    if (spoon.getShell() != null) {
        spoon.getShell().getDisplay().asyncExec(execute);
    } else {
        execute.run();
    }
    try {
        return name.get();
    } catch (Exception e) {
        return "/";
    }
}

From source file:org.apache.pulsar.tests.integration.utils.DockerUtils.java

public static ContainerExecResult runCommand(DockerClient docker, String containerId, String... cmd)
        throws ContainerExecException {
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    String execid = docker.execCreateCmd(containerId).withCmd(cmd).withAttachStderr(true).withAttachStdout(true)
            .exec().getId();/*from w  w w .  j a  va  2  s .  c  om*/
    String cmdString = Arrays.stream(cmd).collect(Collectors.joining(" "));
    StringBuilder stdout = new StringBuilder();
    StringBuilder stderr = new StringBuilder();
    docker.execStartCmd(execid).withDetach(false).exec(new ResultCallback<Frame>() {
        @Override
        public void close() {
        }

        @Override
        public void onStart(Closeable closeable) {
            LOG.info("DOCKER.exec({}:{}): Executing...", containerId, cmdString);
        }

        @Override
        public void onNext(Frame object) {
            LOG.info("DOCKER.exec({}:{}): {}", containerId, cmdString, object);
            if (StreamType.STDOUT == object.getStreamType()) {
                stdout.append(new String(object.getPayload(), UTF_8));
            } else if (StreamType.STDERR == object.getStreamType()) {
                stderr.append(new String(object.getPayload(), UTF_8));
            }
        }

        @Override
        public void onError(Throwable throwable) {
            future.completeExceptionally(throwable);
        }

        @Override
        public void onComplete() {
            LOG.info("DOCKER.exec({}:{}): Done", containerId, cmdString);
            future.complete(true);
        }
    });
    future.join();

    InspectExecResponse resp = docker.inspectExecCmd(execid).exec();
    while (resp.isRunning()) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ie) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(ie);
        }
        resp = docker.inspectExecCmd(execid).exec();
    }
    int retCode = resp.getExitCode();
    ContainerExecResult result = ContainerExecResult.of(retCode, stdout.toString(), stderr.toString());
    LOG.info("DOCKER.exec({}:{}): completed with {}", containerId, cmdString, retCode);

    if (retCode != 0) {
        throw new ContainerExecException(cmdString, containerId, result);
    }
    return result;
}

From source file:com.ikanow.aleph2.distributed_services.services.CoreDistributedServices.java

/** Joins the Akka cluster
 *//*from   w  w w  .  java  2 s . c o  m*/
protected void joinAkkaCluster() {
    if (!_akka_system.isSet()) {
        this.getAkkaSystem(); // (this will also join the cluster)
        return;
    }
    if (!_has_joined_akka_cluster) {
        _has_joined_akka_cluster = true;

        // WORKAROUND FOR BUG IN akka-cluster/akka-zookeeper-seed: if it grabs the old ephemeral connection info of master then bad things can happen
        // so wait until a ZK node that I create for this purpose is removed (so the others also should have been)
        final String application_name = _config_bean.application_name();
        final String hostname_application = DistributedServicesPropertyBean.ZOOKEEPER_APPLICATION_LOCK + "/"
                + ZookeeperUtils.getHostname() + ":" + application_name;
        if (null == application_name) {
            logger.info("(This is a transient application, cannot be the master)");
        } else {
            logger.info("Checking for old ZK artefacts from old instance of this application path="
                    + hostname_application);
            final int MAX_ZK_ATTEMPTS = 6;
            int i = 0;
            for (i = 0; i <= MAX_ZK_ATTEMPTS; ++i) {
                try {
                    this.getCuratorFramework().create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL)
                            .forPath(hostname_application);

                    Thread.sleep(2000L); // (Wait a little longer)
                    break;
                } catch (Exception e) {
                    logger.warn(
                            ErrorUtils.get("Waiting for old instance to be cleared out (err={0}), retrying={1}",
                                    e.getMessage(), i < MAX_ZK_ATTEMPTS));
                    try {
                        Thread.sleep(10000L);
                    } catch (Exception __) {
                    }
                }
            }
            if (i > MAX_ZK_ATTEMPTS) {
                throw new RuntimeException("Failed to clear out lock, not clear why - try removing by hand: "
                        + (DistributedServicesPropertyBean.ZOOKEEPER_APPLICATION_LOCK + "/"
                                + hostname_application));
            }
        }

        ZookeeperClusterSeed.get(_akka_system.get()).join();

        _shutdown_hook.set(Lambdas.wrap_runnable_u(() -> {
            try {
                final CompletableFuture<Unit> wait_for_member_to_leave = new CompletableFuture<>();
                Cluster.get(_akka_system.get())
                        .registerOnMemberRemoved(() -> wait_for_member_to_leave.complete(Unit.unit()));

                _joined_akka_cluster = new CompletableFuture<>(); //(mainly just for testing)
                Cluster.get(_akka_system.get()).leave(ZookeeperClusterSeed.get(_akka_system.get()).address());

                // If it's an application, not transient, then handle synchronization
                try {
                    System.out
                            .println(new java.util.Date() + ": Akka cluster management: Shutting down in ~10s");
                    logger.error("(Not really an error) Shutting down in ~10s");
                } catch (Throwable e) {
                } // logging might not still work at this point

                // (don't delete the ZK node - appear to still be able to run into race problems if you do, left here to remind me):
                //if (null != application_name) {
                //   this.getCuratorFramework().delete().deletingChildrenIfNeeded().forPath(hostname_application);
                //}
                try {
                    wait_for_member_to_leave.get(10L, TimeUnit.SECONDS);
                } catch (Throwable e) {
                    try {
                        System.out.println(new java.util.Date()
                                + ": Akka cluster management: Akka Cluster departure was not able to complete in time: "
                                + e.getMessage());
                        logger.error("Akka Cluster departure was not able to complete in time");
                    } catch (Throwable ee) {
                    } // logging might not still work at this point               
                }
                try {
                    Await.result(_akka_system.get().terminate(), Duration.create(10L, TimeUnit.SECONDS));
                } catch (Throwable e) {
                    try {
                        System.out.println(new java.util.Date()
                                + ": Akka cluster management: Akka System termination was not able to complete in time: "
                                + e.getMessage());
                        logger.error("Akka System termination was not able to complete in time");
                    } catch (Throwable ee) {
                    } // logging might not still work at this point                              
                }

                // All done

                try {
                    System.out.println(new java.util.Date()
                            + ": Akka cluster management:  Akka shut down complete, now exiting");
                    logger.error("(Not really an error) Akka shut down complete, now exiting");
                } catch (Throwable e) {
                } // logging might not still work at this point
            } catch (Throwable t) { // (unknown error, we'll print and log this)
                try {
                    t.printStackTrace();
                    logger.error(ErrorUtils.getLongForm("{0}", t));
                } catch (Throwable e) {
                } // logging might not still work at this point
            }
        }));
        Cluster.get(_akka_system.get()).registerOnMemberUp(() -> {
            logger.info("Joined cluster address=" + ZookeeperClusterSeed.get(_akka_system.get()).address()
                    + ", adding shutdown hook");
            synchronized (_joined_akka_cluster) { // (prevents a race condition vs runOnAkkaJoin)
                _joined_akka_cluster.complete(true);
            }
            // Now register a shutdown hook
            Runtime.getRuntime().addShutdownHook(new Thread(_shutdown_hook.get()));

            _post_join_task_list.stream().parallel().forEach(retval_task -> {
                try {
                    retval_task._2().run();
                    retval_task._1().complete(null);
                } catch (Throwable t) {
                    retval_task._1().completeExceptionally(t);
                }
            });
        });
    }
}

From source file:org.apache.pulsar.client.impl.HttpLookupService.java

@Override
public CompletableFuture<List<String>> getTopicsUnderNamespace(NamespaceName namespace, Mode mode) {
    CompletableFuture<List<String>> future = new CompletableFuture<>();

    String format = namespace.isV2() ? "admin/v2/namespaces/%s/topics?mode=%s"
            : "admin/namespaces/%s/destinations?mode=%s";
    httpClient.get(String.format(format, namespace, mode.toString()), String[].class).thenAccept(topics -> {
        List<String> result = Lists.newArrayList();
        // do not keep partition part of topic name
        Arrays.asList(topics).forEach(topic -> {
            String filtered = TopicName.get(topic).getPartitionedTopicName();
            if (!result.contains(filtered)) {
                result.add(filtered);/*  w ww . j  av a2  s .  c o m*/
            }
        });
        future.complete(result);
    }).exceptionally(ex -> {
        log.warn("Failed to getTopicsUnderNamespace namespace: {}.", namespace, ex.getMessage());
        future.completeExceptionally(ex);
        return null;
    });
    return future;
}

From source file:org.apache.distributedlog.lock.ZKSessionLock.java

/**
 * Get client id and its ephemeral owner.
 *
 * @param zkClient/*from w  ww . j a  v a 2  s . c o  m*/
 *          zookeeper client
 * @param lockPath
 *          lock path
 * @param nodeName
 *          node name
 * @return client id and its ephemeral owner.
 */
static CompletableFuture<Pair<String, Long>> asyncParseClientID(ZooKeeper zkClient, String lockPath,
        String nodeName) {
    String[] parts = nodeName.split("_");
    // member_<clientid>_s<owner_session>_
    if (4 == parts.length && parts[2].startsWith("s")) {
        long sessionOwner = Long.parseLong(parts[2].substring(1));
        String clientId;
        try {
            clientId = URLDecoder.decode(parts[1], UTF_8.name());
            return FutureUtils.value(Pair.of(clientId, sessionOwner));
        } catch (UnsupportedEncodingException e) {
            // if failed to parse client id, we have to get client id by zookeeper#getData.
        }
    }
    final CompletableFuture<Pair<String, Long>> promise = new CompletableFuture<Pair<String, Long>>();
    zkClient.getData(lockPath + "/" + nodeName, false, new AsyncCallback.DataCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
            if (KeeperException.Code.OK.intValue() != rc) {
                promise.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc)));
            } else {
                promise.complete(Pair.of(deserializeClientId(data), stat.getEphemeralOwner()));
            }
        }
    }, null);
    return promise;
}

From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java

private CompletableFuture<LedgerHandle> createLedger(BookKeeper bk, Map<String, byte[]> metadata) {
    CompletableFuture<LedgerHandle> bkf = new CompletableFuture<>();
    bk.asyncCreateLedger(conf.getManagedLedgerDefaultEnsembleSize(), conf.getManagedLedgerDefaultWriteQuorum(),
            conf.getManagedLedgerDefaultAckQuorum(), Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE,
            Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD, (rc, ledger, ctx) -> {
                if (rc != BKException.Code.OK) {
                    bkf.completeExceptionally(BKException.create(rc));
                } else {
                    bkf.complete(ledger);
                }//  w  ww. jav a 2 s.c o  m
            }, null, metadata);
    return bkf;
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> setStateData(Data<Integer> newState) {
    Preconditions.checkNotNull(newState);

    CompletableFuture<Void> result = new CompletableFuture<>();
    synchronized (lock) {
        if (Objects.equals(this.state.getVersion(), newState.getVersion())) {
            this.state = new Data<>(newState.getData(), newState.getVersion() + 1);
            result.complete(null);
        } else {// w w w .  j a  v a2  s.c  o m
            result.completeExceptionally(StoreException.create(StoreException.Type.WRITE_CONFLICT, getName()));
        }
    }

    return result;
}