Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:com.helion3.prism.api.query.QueryBuilder.java

/**
 * Builds a {@link Query} by parsing an array of arguments.
 *
 * @param parameters String[] Parameter:value list
 * @return {@link Query} Database query object
 *///ww w .j  ava  2s  .com
public static CompletableFuture<Query> fromArguments(QuerySession session, @Nullable String[] arguments)
        throws ParameterException {
    checkNotNull(session);

    Query query = new Query();
    CompletableFuture<Query> future = new CompletableFuture<Query>();

    // Track all parameter pairs
    Map<String, String> definedParameters = new HashMap<String, String>();

    if (arguments.length > 0) {
        List<ListenableFuture<?>> futures = new ArrayList<ListenableFuture<?>>();
        for (String arg : arguments) {
            Optional<ListenableFuture<?>> listenable;

            if (flagPattern.matcher(arg).matches()) {
                listenable = parseFlagFromArgument(session, query, arg);
            } else {
                // Get alias/value pair
                Pair<String, String> pair = getParameterKeyValue(arg);

                // Parse for handler
                listenable = parseParameterFromArgument(session, query, pair);

                // Add to list of defined
                definedParameters.put(pair.getKey(), pair.getValue());
            }

            if (listenable.isPresent()) {
                futures.add(listenable.get());
            }
        }

        if (!futures.isEmpty()) {
            ListenableFuture<List<Object>> combinedFuture = Futures.allAsList(futures);
            combinedFuture.addListener(new Runnable() {
                @Override
                public void run() {
                    future.complete(query);
                }
            }, MoreExecutors.sameThreadExecutor());
        } else {
            future.complete(query);
        }
    } else {
        future.complete(query);
    }

    if (Prism.getConfig().getNode("defaults", "enabled").getBoolean()) {
        // Require any parameter defaults
        String defaultsUsed = "";
        for (ParameterHandler handler : Prism.getParameterHandlers()) {
            boolean aliasFound = false;

            for (String alias : handler.getAliases()) {
                if (definedParameters.containsKey(alias)) {
                    aliasFound = true;
                    break;
                }
            }

            if (!aliasFound) {
                Optional<Pair<String, String>> pair = handler.processDefault(session, query);
                if (pair.isPresent()) {
                    defaultsUsed += pair.get().getKey() + ":" + pair.get().getValue() + " ";
                }
            }
        }

        // @todo should move this
        if (!defaultsUsed.isEmpty()) {
            session.getCommandSource().get().sendMessage(
                    Format.subduedHeading(Text.of(String.format("Defaults used: %s", defaultsUsed))));
        }
    }

    return future;
}

From source file:org.apache.pulsar.compaction.CompactedTopicTest.java

/**
 * Build a compacted ledger, and return the id of the ledger, the position of the different
 * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap.
 *//*from   w w w  . jav  a 2 s  .  c  o m*/
private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger(
        BookKeeper bk, int count) throws Exception {
    LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE,
            Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD);
    List<Pair<MessageIdData, Long>> positions = new ArrayList<>();
    List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>();

    AtomicLong ledgerIds = new AtomicLong(10L);
    AtomicLong entryIds = new AtomicLong(0L);
    CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> {
        List<MessageIdData> idsInGap = new ArrayList<MessageIdData>();
        if (r.nextInt(10) == 1) {
            long delta = r.nextInt(10) + 1;
            idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1)
                    .build());
            ledgerIds.addAndGet(delta);
            entryIds.set(0);
        }
        long delta = r.nextInt(5);
        if (delta != 0) {
            idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1)
                    .build());
        }
        MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get())
                .setEntryId(entryIds.addAndGet(delta + 1)).build();

        @Cleanup
        RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER);

        CompletableFuture<Void> f = new CompletableFuture<>();
        ByteBuf buffer = m.serialize();

        lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> {
            if (rc != BKException.Code.OK) {
                f.completeExceptionally(BKException.create(rc));
            } else {
                positions.add(Pair.of(id, eid));
                idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid)));
                f.complete(null);
            }
        }, null);
        return f;
    }).toArray(CompletableFuture[]::new)).get();
    lh.close();

    return Triple.of(lh.getId(), positions, idsInGaps);
}

From source file:org.onosproject.store.consistent.impl.DistributedLeadershipManager.java

private void electLeaders() {
    try {/*from w ww. j  a  v a  2  s.com*/
        candidateMap.entrySet().forEach(entry -> {
            String path = entry.getKey();
            Versioned<List<NodeId>> candidates = entry.getValue();
            // for active topics, check if this node can become a leader (if it isn't already)
            if (activeTopics.contains(path)) {
                lockExecutor.submit(() -> {
                    Leadership leadership = electLeader(path, candidates.value());
                    if (leadership != null) {
                        CompletableFuture<Leadership> future = pendingFutures.remove(path);
                        if (future != null) {
                            future.complete(leadership);
                        }
                    }
                });
            }
            // Raise a CANDIDATES_CHANGED event to force refresh local candidate board
            // and also to update local listeners.
            // Don't worry about duplicate events as they will be suppressed.
            onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.CANDIDATES_CHANGED,
                    new Leadership(path, candidates.value(), candidates.version(), candidates.creationTime())));
        });
    } catch (Exception e) {
        log.debug("Failure electing leaders", e);
    }
}

From source file:org.apache.distributedlog.BookKeeperClient.java

public CompletableFuture<LedgerHandle> createLedger(int ensembleSize, int writeQuorumSize, int ackQuorumSize) {
    BookKeeper bk;/*from w  ww .ja  v  a 2  s  .  c  o  m*/
    try {
        bk = get();
    } catch (IOException ioe) {
        return FutureUtils.exception(ioe);
    }
    final CompletableFuture<LedgerHandle> promise = new CompletableFuture<LedgerHandle>();
    bk.asyncCreateLedger(ensembleSize, writeQuorumSize, ackQuorumSize, BookKeeper.DigestType.CRC32, passwd,
            new AsyncCallback.CreateCallback() {
                @Override
                public void createComplete(int rc, LedgerHandle lh, Object ctx) {
                    if (BKException.Code.OK == rc) {
                        promise.complete(lh);
                    } else {
                        promise.completeExceptionally(BKException.create(rc));
                    }
                }
            }, null, Collections.emptyMap());
    return promise;
}

From source file:io.pravega.client.segment.impl.SegmentOutputStreamFactoryImpl.java

@Override
public SegmentOutputStream createOutputStreamForTransaction(Segment segment, UUID txId,
        Consumer<Segment> segmentSealedCallback, EventWriterConfig config) {
    CompletableFuture<String> name = new CompletableFuture<>();
    FailingReplyProcessor replyProcessor = new FailingReplyProcessor() {

        @Override/*from w w w .ja v  a  2s  .com*/
        public void connectionDropped() {
            name.completeExceptionally(new ConnectionClosedException());
        }

        @Override
        public void wrongHost(WireCommands.WrongHost wrongHost) {
            name.completeExceptionally(new NotImplementedException());
        }

        @Override
        public void transactionInfo(WireCommands.TransactionInfo info) {
            name.complete(info.getTransactionName());
        }

        @Override
        public void processingFailure(Exception error) {
            name.completeExceptionally(error);
        }
    };
    val connectionFuture = controller.getEndpointForSegment(segment.getScopedName())
            .thenCompose((PravegaNodeUri endpointForSegment) -> {
                return cf.establishConnection(endpointForSegment, replyProcessor);
            });
    connectionFuture.thenAccept((ClientConnection connection) -> {
        try {
            connection.send(new WireCommands.GetTransactionInfo(1, segment.getScopedName(), txId));
        } catch (ConnectionFailedException e) {
            throw new RuntimeException(e);
        }
    }).exceptionally(t -> {
        name.completeExceptionally(t);
        return null;
    });
    name.whenComplete((s, e) -> {
        getAndHandleExceptions(connectionFuture, RuntimeException::new).close();
    });
    return new SegmentOutputStreamImpl(getAndHandleExceptions(name, RuntimeException::new), controller, cf,
            UUID.randomUUID(), segmentSealedCallback, getRetryFromConfig(config));
}

From source file:com.ikanow.aleph2.storm.harvest_technology.StormHarvestTechnologyModule.java

@Override
public CompletableFuture<BasicMessageBean> onUpdatedSource(DataBucketBean old_bucket, DataBucketBean new_bucket,
        boolean is_enabled, Optional<BucketDiffBean> diff, IHarvestContext context) {
    logger.info("received update source request");
    CompletableFuture<BasicMessageBean> stop_future = onDelete(old_bucket, context);
    try {//w  w  w . ja va 2  s  .  c o  m
        logger.info("waiting for stop to complete");
        stop_future.get(10L, TimeUnit.SECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        //set failure in completable future
        logger.info("stop failed, returning that ", e);
        stop_future.complete(new BasicMessageBean(new Date(), false, null, "updateSource", null,
                ErrorUtils.getLongForm("{0}", e), null));
        return stop_future;
    }
    return onNewSource(new_bucket, context, is_enabled);
}

From source file:org.apache.pulsar.tests.integration.utils.DockerUtils.java

public static ContainerExecResultBytes runCommandWithRawOutput(DockerClient docker, String containerId,
        String... cmd) throws ContainerExecException {
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    String execid = docker.execCreateCmd(containerId).withCmd(cmd).withAttachStderr(true).withAttachStdout(true)
            .exec().getId();/*from   ww  w .  ja  v a  2 s  .  co  m*/
    String cmdString = Arrays.stream(cmd).collect(Collectors.joining(" "));
    ByteBuf stdout = Unpooled.buffer();
    ByteBuf stderr = Unpooled.buffer();
    docker.execStartCmd(execid).withDetach(false).exec(new ResultCallback<Frame>() {
        @Override
        public void close() {
        }

        @Override
        public void onStart(Closeable closeable) {
            LOG.info("DOCKER.exec({}:{}): Executing...", containerId, cmdString);
        }

        @Override
        public void onNext(Frame object) {
            if (StreamType.STDOUT == object.getStreamType()) {
                stdout.writeBytes(object.getPayload());
            } else if (StreamType.STDERR == object.getStreamType()) {
                stderr.writeBytes(object.getPayload());
            }
        }

        @Override
        public void onError(Throwable throwable) {
            future.completeExceptionally(throwable);
        }

        @Override
        public void onComplete() {
            LOG.info("DOCKER.exec({}:{}): Done", containerId, cmdString);
            future.complete(true);
        }
    });
    future.join();

    InspectExecResponse resp = docker.inspectExecCmd(execid).exec();
    while (resp.isRunning()) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ie) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(ie);
        }
        resp = docker.inspectExecCmd(execid).exec();
    }
    int retCode = resp.getExitCode();

    byte[] stdoutBytes = new byte[stdout.readableBytes()];
    stdout.readBytes(stdoutBytes);
    byte[] stderrBytes = new byte[stderr.readableBytes()];
    stderr.readBytes(stderrBytes);

    ContainerExecResultBytes result = ContainerExecResultBytes.of(retCode, stdoutBytes, stderrBytes);
    LOG.info("DOCKER.exec({}:{}): completed with {}", containerId, cmdString, retCode);

    if (retCode != 0) {
        throw new ContainerExecException(cmdString, containerId, null);
    }
    return result;
}

From source file:org.apache.bookkeeper.meta.MockLedgerManager.java

@Override
public CompletableFuture<Versioned<LedgerMetadata>> createLedgerMetadata(long ledgerId,
        LedgerMetadata metadata) {// w  w  w . j ava  2  s  . c  om
    CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>();
    executor.submit(() -> {
        if (metadataMap.containsKey(ledgerId)) {
            executeCallback(() -> promise.completeExceptionally(new BKException.BKLedgerExistException()));
        } else {
            try {
                metadataMap.put(ledgerId, Pair.of(new LongVersion(0L), serDe.serialize(metadata)));
                Versioned<LedgerMetadata> readBack = readMetadata(ledgerId);
                executeCallback(() -> promise.complete(readBack));
            } catch (Exception e) {
                LOG.error("Error reading back written metadata", e);
                executeCallback(() -> promise.completeExceptionally(new BKException.MetaStoreException()));
            }
        }
    });
    return promise;
}

From source file:org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider.java

/**
 * Check whether the specified role can perform a lookup for the specified topic.
 *
 * For that the caller needs to have producer or consumer permission.
 *
 * @param topicName//from   www . j a  v a2 s  .co m
 * @param role
 * @return
 * @throws Exception
 */
@Override
public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role,
        AuthenticationDataSource authenticationData) {
    CompletableFuture<Boolean> finalResult = new CompletableFuture<Boolean>();
    canProduceAsync(topicName, role, authenticationData).whenComplete((produceAuthorized, ex) -> {
        if (ex == null) {
            if (produceAuthorized) {
                finalResult.complete(produceAuthorized);
                return;
            }
        } else {
            if (log.isDebugEnabled()) {
                log.debug(
                        "Topic [{}] Role [{}] exception occured while trying to check Produce permissions. {}",
                        topicName.toString(), role, ex.getMessage());
            }
        }
        canConsumeAsync(topicName, role, authenticationData, null).whenComplete((consumeAuthorized, e) -> {
            if (e == null) {
                if (consumeAuthorized) {
                    finalResult.complete(consumeAuthorized);
                    return;
                }
            } else {
                if (log.isDebugEnabled()) {
                    log.debug(
                            "Topic [{}] Role [{}] exception occured while trying to check Consume permissions. {}",
                            topicName.toString(), role, e.getMessage());

                }
                finalResult.completeExceptionally(e);
                return;
            }
            finalResult.complete(false);
        });
    });
    return finalResult;
}

From source file:org.apache.servicecomb.foundation.vertx.http.ReadStreamPart.java

public <T> CompletableFuture<T> saveAs(Function<Buffer, T> converter) {
    CompletableFuture<T> future = new CompletableFuture<>();
    Buffer buffer = Buffer.buffer();

    // if readStream.resume() not run on correct eventloop, will:
    //  1.create a context task to save last chunk data to buffer
    //  2.activate connection to read new data
    //  but maybe 2 will run before 1, that will cause lost data or get incorrect data
    context.runOnContext(V -> {//from  w  w w . j a  va 2 s  . co  m
        readStream.exceptionHandler(future::completeExceptionally);
        readStream.handler(buffer::appendBuffer);
        readStream.endHandler(v -> future.complete(converter.apply(buffer)));
        readStream.resume();
    });

    return future;
}