Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.apache.distributedlog.lock.ZKSessionLock.java

/**
 * Try lock. If wait is true, it would wait and watch sibling to acquire lock when
 * the sibling is dead. <i>acquireCompletableFuture</i> will be notified either it locked successfully
 * or the lock failed. The promise will only satisfy with current lock owner.
 *
 * <p>NOTE: the <i>promise</i> is only satisfied on <i>lockStateExecutor</i>, so any
 * transformations attached on promise will be executed in order.</p>
 *
 * @param wait/*  w ww. j ava  2 s .  c  o m*/
 *          whether to wait for ownership.
 * @param promise
 *          promise to satisfy with current lock owner.
 */
private void asyncTryLockWithoutCleanup(final boolean wait, final CompletableFuture<String> promise) {
    executeLockAction(getEpoch(), new LockAction() {
        @Override
        public void execute() {
            if (!lockState.inState(State.INIT)) {
                promise.completeExceptionally(
                        new LockStateChangedException(lockPath, lockId, State.INIT, lockState.getState()));
                return;
            }
            lockState.transition(State.PREPARING);

            final int curEpoch = epochUpdater.incrementAndGet(ZKSessionLock.this);
            watcher = new LockWatcher(curEpoch);
            // register watcher for session expires
            zkClient.register(watcher);
            // Encode both client id and session in the lock node
            String myPath;
            try {
                // member_<clientid>_s<owner_session>_
                myPath = getLockPathPrefixV3(lockPath, lockId.getLeft(), lockId.getRight());
            } catch (UnsupportedEncodingException uee) {
                myPath = getLockPathPrefixV1(lockPath);
            }
            zk.create(myPath, serializeClientId(lockId.getLeft()), zkClient.getDefaultACL(),
                    CreateMode.EPHEMERAL_SEQUENTIAL, new AsyncCallback.StringCallback() {
                        @Override
                        public void processResult(final int rc, String path, Object ctx, final String name) {
                            executeLockAction(curEpoch, new LockAction() {
                                @Override
                                public void execute() {
                                    if (KeeperException.Code.OK.intValue() != rc) {
                                        KeeperException ke = KeeperException
                                                .create(KeeperException.Code.get(rc));
                                        promise.completeExceptionally(ke);
                                        return;
                                    }

                                    if (FailpointUtils.checkFailPointNoThrow(
                                            FailpointUtils.FailPointName.FP_LockTryCloseRaceCondition)) {
                                        lockState.transition(State.CLOSING);
                                        lockState.transition(State.CLOSED);
                                    }

                                    if (null != currentNode) {
                                        LOG.error("Current node for {} overwritten current = {} new = {}",
                                                new Object[] { lockPath, lockId,
                                                        getLockIdFromPath(currentNode) });
                                    }

                                    currentNode = name;
                                    currentId = getLockIdFromPath(currentNode);
                                    LOG.trace("{} received member id for lock {}", lockId, currentId);

                                    if (lockState.isExpiredOrClosing()) {
                                        // Delete node attempt may have come after PREPARING but before create node,
                                        // in which case we'd be left with a dangling node unless we clean up.
                                        CompletableFuture<Void> deletePromise = new CompletableFuture<Void>();
                                        deleteLockNode(deletePromise);
                                        FutureUtils
                                                .ensure(deletePromise,
                                                        () -> promise.completeExceptionally(
                                                                new LockClosedException(lockPath, lockId,
                                                                        lockState.getState())));
                                        return;
                                    }

                                    lockState.transition(State.PREPARED);
                                    checkLockOwnerAndWaitIfPossible(watcher, wait, promise);
                                }

                                @Override
                                public String getActionName() {
                                    return "postPrepare(wait=" + wait + ")";
                                }
                            });
                        }
                    }, null);
        }

        @Override
        public String getActionName() {
            return "prepare(wait=" + wait + ")";
        }
    }, promise);
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

public synchronized CompletableFuture<DLSN> writeInternal(LogRecord record) throws LogRecordTooLongException,
        LockingException, BKTransmitException, WriteException, InvalidEnvelopedEntryException {
    int logRecordSize = record.getPersistentSize();

    if (logRecordSize > MAX_LOGRECORD_SIZE) {
        throw new LogRecordTooLongException(String.format(
                "Log Record of size %d written when only %d is allowed", logRecordSize, MAX_LOGRECORD_SIZE));
    }/*from w w w. ja v a 2s .  c  o m*/

    // If we will exceed the max number of bytes allowed per entry
    // initiate a transmit before accepting the new log record
    if ((recordSetWriter.getNumBytes() + logRecordSize) > MAX_LOGRECORDSET_SIZE) {
        checkStateAndTransmit();
    }

    checkWriteLock();

    if (enableRecordCounts) {
        // Set the count here. The caller would appropriately increment it
        // if this log record is to be counted
        record.setPositionWithinLogSegment(positionWithinLogSegment);
    }

    CompletableFuture<DLSN> writePromise = new CompletableFuture<DLSN>();
    writePromise.whenComplete(new OpStatsListener<DLSN>(writeTime));
    recordSetWriter.writeRecord(record, writePromise);

    if (record.getTransactionId() < lastTxId) {
        LOG.info("Log Segment {} TxId decreased Last: {} Record: {}",
                new Object[] { fullyQualifiedLogSegment, lastTxId, record.getTransactionId() });
    }
    if (!record.isControl()) {
        // only update last tx id for user records
        lastTxId = record.getTransactionId();
        outstandingBytes += (20 + record.getPayload().length);
    }
    return writePromise;
}

From source file:org.eclipse.smarthome.io.transport.mqtt.MqttBrokerConnection.java

/**
 * Publish a message to the broker with the given QoS and retained flag.
 *
 * @param topic The topic/*from   www .  j  a  v a2  s.c om*/
 * @param payload The message payload
 * @param qos The quality of service for this message
 * @param retain Set to true to retain the message on the broker
 * @param listener An optional listener to be notified of success or failure of the delivery.
 * @return Returns a future that completes with a result of true if the publishing succeeded and completes
 *         exceptionally on an error or with a result of false if no broker connection is established.
 */
public CompletableFuture<Boolean> publish(String topic, byte[] payload, int qos, boolean retain) {
    MqttAsyncClient client = this.client;
    if (client == null) {
        return CompletableFuture.completedFuture(false);
    }
    // publish message asynchronously
    CompletableFuture<Boolean> f = new CompletableFuture<Boolean>();
    try {
        client.publish(topic, payload, qos, retain, f, actionCallback);
    } catch (org.eclipse.paho.client.mqttv3.MqttException e) {
        f.completeExceptionally(new MqttException(e));
    }
    return f;
}

From source file:org.apache.bookkeeper.client.BookKeeper.java

/**
 * Synchronous call to delete a ledger. Parameters match those of
 * {@link #asyncDeleteLedger(long, AsyncCallback.DeleteCallback, Object)}
 *
 * @param lId// w  ww .  ja v a2 s .  co  m
 *            ledgerId
 * @throws InterruptedException
 * @throws BKException.BKNoSuchLedgerExistsException if the ledger doesn't exist
 * @throws BKException
 */
public void deleteLedger(long lId) throws InterruptedException, BKException {
    CompletableFuture<Void> counter = new CompletableFuture<>();
    // Call asynchronous version
    asyncDeleteLedger(lId, new SyncDeleteCallback(), counter);

    SynchCallbackUtils.waitForResult(counter);
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void autoTriggerWhileManualInProgress() throws Exception {
    CompletableFuture<Void> slowOffload = new CompletableFuture<>();
    CountDownLatch offloadRunning = new CountDownLatch(1);
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*from   w ww.java  2s.  c o m*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadRunning.countDown();
            return slowOffload.thenCompose((res) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setOffloadAutoTriggerSizeThresholdBytes(100);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);

    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    // Ledger rolls once, threshold not hit so auto shouldn't run
    for (int i = 0; i < 14; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    Position p = ledger.addEntry(buildEntry(10, "trigger-entry"));

    OffloadCallbackPromise cbPromise = new OffloadCallbackPromise();
    ledger.asyncOffloadPrefix(p, cbPromise, null);
    offloadRunning.await();

    // add enough entries to roll the ledger a couple of times and trigger some offloads
    for (int i = 0; i < 20; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }

    // allow the manual offload to complete
    slowOffload.complete(null);

    Assert.assertEquals(cbPromise.join(),
            PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0));

    // auto trigger should eventually offload everything else over threshold
    assertEventuallyTrue(() -> offloader.offloadedLedgers().size() == 2);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId()));
}

From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java

private <T> CompletableFuture<T> failedFuture(Throwable error) {
    CompletableFuture<T> future = new CompletableFuture<>();
    future.completeExceptionally(error);
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

/**
 * Delete the managed ledger associated with this topic
 *
 * @param failIfHasSubscriptions/*from www .  j ava 2s  .  c om*/
 *            Flag indicating whether delete should succeed if topic still has unconnected subscriptions. Set to
 *            false when called from admin API (it will delete the subs too), and set to true when called from GC
 *            thread
 * @param closeIfClientsConnected
 *            Flag indicate whether explicitly close connected producers/consumers/replicators before trying to delete topic. If
 *            any client is connected to a topic and if this flag is disable then this operation fails.
 *
 * @return Completable future indicating completion of delete operation Completed exceptionally with:
 *         IllegalStateException if topic is still active ManagedLedgerException if ledger delete operation fails
 */
private CompletableFuture<Void> delete(boolean failIfHasSubscriptions, boolean closeIfClientsConnected) {
    CompletableFuture<Void> deleteFuture = new CompletableFuture<>();

    lock.writeLock().lock();
    try {
        if (isFenced) {
            log.warn("[{}] Topic is already being closed or deleted", topic);
            deleteFuture.completeExceptionally(new TopicFencedException("Topic is already fenced"));
            return deleteFuture;
        }

        CompletableFuture<Void> closeClientFuture = new CompletableFuture<>();
        if (closeIfClientsConnected) {
            List<CompletableFuture<Void>> futures = Lists.newArrayList();
            replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect()));
            producers.forEach(producer -> futures.add(producer.disconnect()));
            subscriptions.forEach((s, sub) -> futures.add(sub.disconnect()));
            FutureUtil.waitForAll(futures).thenRun(() -> {
                closeClientFuture.complete(null);
            }).exceptionally(ex -> {
                log.error("[{}] Error closing clients", topic, ex);
                isFenced = false;
                closeClientFuture.completeExceptionally(ex);
                return null;
            });
        } else {
            closeClientFuture.complete(null);
        }

        closeClientFuture.thenAccept(delete -> {
            if (USAGE_COUNT_UPDATER.get(this) == 0) {
                isFenced = true;

                List<CompletableFuture<Void>> futures = Lists.newArrayList();

                if (failIfHasSubscriptions) {
                    if (!subscriptions.isEmpty()) {
                        isFenced = false;
                        deleteFuture.completeExceptionally(new TopicBusyException("Topic has subscriptions"));
                        return;
                    }
                } else {
                    subscriptions.forEach((s, sub) -> futures.add(sub.delete()));
                }

                FutureUtil.waitForAll(futures).whenComplete((v, ex) -> {
                    if (ex != null) {
                        log.error("[{}] Error deleting topic", topic, ex);
                        isFenced = false;
                        deleteFuture.completeExceptionally(ex);
                    } else {
                        ledger.asyncDelete(new AsyncCallbacks.DeleteLedgerCallback() {
                            @Override
                            public void deleteLedgerComplete(Object ctx) {
                                brokerService.removeTopicFromCache(topic);
                                log.info("[{}] Topic deleted", topic);
                                deleteFuture.complete(null);
                            }

                            @Override
                            public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) {
                                isFenced = false;
                                log.error("[{}] Error deleting topic", topic, exception);
                                deleteFuture.completeExceptionally(new PersistenceException(exception));
                            }
                        }, null);
                    }
                });
            } else {
                deleteFuture.completeExceptionally(new TopicBusyException(
                        "Topic has " + USAGE_COUNT_UPDATER.get(this) + " connected producers/consumers"));
            }
        }).exceptionally(ex -> {
            deleteFuture.completeExceptionally(
                    new TopicBusyException("Failed to close clients before deleting topic."));
            return null;
        });
    } finally {
        lock.writeLock().unlock();
    }

    return deleteFuture;
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void multipleAutoTriggers() throws Exception {
    CompletableFuture<Void> slowOffload = new CompletableFuture<>();
    CountDownLatch offloadRunning = new CountDownLatch(1);
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//w w  w. j  a v a2  s . c o  m
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadRunning.countDown();
            return slowOffload.thenCompose((res) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setOffloadAutoTriggerSizeThresholdBytes(100);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);

    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    // Ledger will roll twice, offload will run on first ledger after second closed
    for (int i = 0; i < 25; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    offloadRunning.await();

    // trigger a bunch more rolls. Eventually there will be 5 ledgers.
    // first 3 should be offloaded, 4th is 100bytes, 5th is 0 bytes.
    // 4th and 5th sum to 100 bytes so they're just at edge of threshold
    for (int i = 0; i < 20; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }

    // allow the first offload to continue
    slowOffload.complete(null);

    assertEventuallyTrue(() -> offloader.offloadedLedgers().size() == 3);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(2).getLedgerId()));
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<TableDescriptor> getDescriptor(TableName tableName) {
    CompletableFuture<TableDescriptor> future = new CompletableFuture<>();
    addListener(this.<List<TableSchema>>newMasterCaller().priority(tableName)
            .action((controller, stub) -> this
                    .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>>call(
                            controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName),
                            (s, c, req, done) -> s.getTableDescriptors(c, req, done),
                            (resp) -> resp.getTableSchemaList()))
            .call(), (tableSchemas, error) -> {
                if (error != null) {
                    future.completeExceptionally(error);
                    return;
                }/*  w w w  .ja v  a  2s  .  c o  m*/
                if (!tableSchemas.isEmpty()) {
                    future.complete(ProtobufUtil.toTableDescriptor(tableSchemas.get(0)));
                } else {
                    future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
                }
            });
    return future;
}

From source file:org.apache.distributedlog.lock.ZKSessionLock.java

CompletableFuture<Void> asyncUnlock(final Throwable cause) {
    final CompletableFuture<Void> promise = new CompletableFuture<Void>();

    // Use lock executor here rather than lock action, because we want this opertaion to be applied
    // whether the epoch has changed or not. The member node is EPHEMERAL_SEQUENTIAL so there's no
    // risk of an ABA problem where we delete and recreate a node and then delete it again here.
    lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() {
        @Override/*from  w  w  w  . j  ava2 s .co  m*/
        public void safeRun() {
            acquireFuture.completeExceptionally(cause);
            unlockInternal(promise);
            promise.whenComplete(new OpStatsListener<Void>(unlockStats));
        }
    });

    return promise;
}