Example usage for java.util.concurrent CompletableFuture thenCompose

List of usage examples for java.util.concurrent CompletableFuture thenCompose

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture thenCompose.

Prototype

public <U> CompletableFuture<U> thenCompose(Function<? super T, ? extends CompletionStage<U>> fn) 

Source Link

Usage

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void manualTriggerWhileAutoInProgress() throws Exception {
    CompletableFuture<Void> slowOffload = new CompletableFuture<>();
    CountDownLatch offloadRunning = new CountDownLatch(1);
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//from w  w  w  . j av a 2s .  c  om
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadRunning.countDown();
            return slowOffload.thenCompose((res) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setOffloadAutoTriggerSizeThresholdBytes(100);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);

    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    // Ledger will roll twice, offload will run on first ledger after second closed
    for (int i = 0; i < 25; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    offloadRunning.await();

    for (int i = 0; i < 20; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    Position p = ledger.addEntry(buildEntry(10, "last-entry"));

    try {
        ledger.offloadPrefix(p);
        Assert.fail("Shouldn't have succeeded");
    } catch (ManagedLedgerException.OffloadInProgressException e) {
        // expected
    }

    slowOffload.complete(null);

    // eventually all over threshold will be offloaded
    assertEventuallyTrue(() -> offloader.offloadedLedgers().size() == 3);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(2).getLedgerId()));

    // then a manual offload can run and offload the one ledger under the threshold
    ledger.offloadPrefix(p);

    Assert.assertEquals(offloader.offloadedLedgers().size(), 4);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(2).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(3).getLedgerId()));
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void autoTriggerWhileManualInProgress() throws Exception {
    CompletableFuture<Void> slowOffload = new CompletableFuture<>();
    CountDownLatch offloadRunning = new CountDownLatch(1);
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*  ww  w. ja va  2  s.  co m*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadRunning.countDown();
            return slowOffload.thenCompose((res) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setOffloadAutoTriggerSizeThresholdBytes(100);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);

    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    // Ledger rolls once, threshold not hit so auto shouldn't run
    for (int i = 0; i < 14; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    Position p = ledger.addEntry(buildEntry(10, "trigger-entry"));

    OffloadCallbackPromise cbPromise = new OffloadCallbackPromise();
    ledger.asyncOffloadPrefix(p, cbPromise, null);
    offloadRunning.await();

    // add enough entries to roll the ledger a couple of times and trigger some offloads
    for (int i = 0; i < 20; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }

    // allow the manual offload to complete
    slowOffload.complete(null);

    Assert.assertEquals(cbPromise.join(),
            PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0));

    // auto trigger should eventually offload everything else over threshold
    assertEventuallyTrue(() -> offloader.offloadedLedgers().size() == 2);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId()));
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void multipleAutoTriggers() throws Exception {
    CompletableFuture<Void> slowOffload = new CompletableFuture<>();
    CountDownLatch offloadRunning = new CountDownLatch(1);
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override// w w w  .  j  a v a 2 s  .  com
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadRunning.countDown();
            return slowOffload.thenCompose((res) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setOffloadAutoTriggerSizeThresholdBytes(100);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);

    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    // Ledger will roll twice, offload will run on first ledger after second closed
    for (int i = 0; i < 25; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    offloadRunning.await();

    // trigger a bunch more rolls. Eventually there will be 5 ledgers.
    // first 3 should be offloaded, 4th is 100bytes, 5th is 0 bytes.
    // 4th and 5th sum to 100 bytes so they're just at edge of threshold
    for (int i = 0; i < 20; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }

    // allow the first offload to continue
    slowOffload.complete(null);

    assertEventuallyTrue(() -> offloader.offloadedLedgers().size() == 3);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(2).getLedgerId()));
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

@Override
public synchronized CompletableFuture<Long> flush() {
    try {/*from  w w w.j  a v a  2 s  . c o  m*/
        checkStateBeforeTransmit();
    } catch (WriteException e) {
        return FutureUtils.exception(e);
    }

    CompletableFuture<Integer> transmitFuture;
    try {
        transmitFuture = transmit();
    } catch (BKTransmitException e) {
        return FutureUtils.exception(e);
    } catch (LockingException e) {
        return FutureUtils.exception(e);
    } catch (WriteException e) {
        return FutureUtils.exception(e);
    } catch (InvalidEnvelopedEntryException e) {
        return FutureUtils.exception(e);
    }

    if (null == transmitFuture) {
        if (null != packetPrevious) {
            transmitFuture = packetPrevious.getTransmitFuture();
        } else {
            return FutureUtils.value(getLastTxIdAcknowledged());
        }
    }

    return transmitFuture.thenCompose(GET_LAST_TXID_ACKNOWLEDGED_AFTER_TRANSMIT_FUNC);
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

@Override
public synchronized CompletableFuture<Long> commit() {
    // we don't pack control records with user records together
    // so transmit current output buffer if possible
    CompletableFuture<Integer> transmitFuture;
    try {/*from  w ww  .  j a v  a 2  s  .com*/
        try {
            transmitFuture = transmit();
        } catch (IOException ioe) {
            return FutureUtils.exception(ioe);
        }
        if (null == transmitFuture) {
            writeControlLogRecord();
            return flush();
        }
    } catch (IOException ioe) {
        return FutureUtils.exception(ioe);
    }
    return transmitFuture.thenCompose(GET_LAST_TXID_ACKNOWLEDGED_AFTER_TRANSMIT_FUNC);
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

@Override
public void checkGC(int gcIntervalInSeconds) {
    if (isActive()) {
        lastActive = System.nanoTime();
    } else if (System.nanoTime() - lastActive < TimeUnit.SECONDS.toNanos(gcIntervalInSeconds)) {
        // Gc interval did not expire yet
        return;//  w  w w.  ja  va 2 s  . c o m
    } else if (shouldTopicBeRetained()) {
        // Topic activity is still within the retention period
        return;
    } else {
        CompletableFuture<Void> replCloseFuture = new CompletableFuture<>();

        if (TopicName.get(topic).isGlobal()) {
            // For global namespace, close repl producers first.
            // Once all repl producers are closed, we can delete the topic,
            // provided no remote producers connected to the broker.
            if (log.isDebugEnabled()) {
                log.debug("[{}] Global topic inactive for {} seconds, closing repl producers.", topic,
                        gcIntervalInSeconds);
            }
            closeReplProducersIfNoBacklog().thenRun(() -> {
                if (hasRemoteProducers()) {
                    if (log.isDebugEnabled()) {
                        log.debug("[{}] Global topic has connected remote producers. Not a candidate for GC",
                                topic);
                    }
                    replCloseFuture.completeExceptionally(
                            new TopicBusyException("Topic has connected remote producers"));
                } else {
                    log.info("[{}] Global topic inactive for {} seconds, closed repl producers", topic,
                            gcIntervalInSeconds);
                    replCloseFuture.complete(null);
                }
            }).exceptionally(e -> {
                if (log.isDebugEnabled()) {
                    log.debug("[{}] Global topic has replication backlog. Not a candidate for GC", topic);
                }
                replCloseFuture.completeExceptionally(e.getCause());
                return null;
            });
        } else {
            replCloseFuture.complete(null);
        }

        replCloseFuture.thenCompose(v -> delete(true))
                .thenRun(() -> log.info("[{}] Topic deleted successfully due to inactivity", topic))
                .exceptionally(e -> {
                    if (e.getCause() instanceof TopicBusyException) {
                        // topic became active again
                        if (log.isDebugEnabled()) {
                            log.debug("[{}] Did not delete busy topic: {}", topic, e.getCause().getMessage());
                        }
                    } else {
                        log.warn("[{}] Inactive topic deletion failed", topic, e);
                    }
                    return null;
                });

    }
}

From source file:org.openhab.binding.mqtt.homeassistant.internal.handler.HomeAssistantThingHandler.java

/**
 * Start a background discovery for the configured HA MQTT object-id.
 *///from  ww  w  .  j  a v  a 2 s  . co m
@Override
protected CompletableFuture<@Nullable Void> start(MqttBrokerConnection connection) {
    connection.setRetain(true);
    connection.setQos(1);

    updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.GONE, "No response from the device yet");

    // Start all known components and channels within the components and put the Thing offline
    // if any subscribing failed ( == broker connection lost)
    CompletableFuture<@Nullable Void> future = haComponents.values().stream()
            .map(e -> e.start(connection, scheduler, attributeReceiveTimeout))
            .reduce(CompletableFuture.completedFuture(null), (a, v) -> a.thenCompose(b -> v)) // reduce to one
            .exceptionally(e -> {
                updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.CONFIGURATION_ERROR, e.getMessage());
                return null;
            });

    return future
            .thenCompose(b -> discoverComponents.startDiscovery(connection, 0, discoveryHomeAssistantID, this));
}

From source file:ru.histone.v2.evaluator.Evaluator.java

private CompletableFuture<EvalNode> processMethod(ExpAstNode expNode, Context context) {
    final int valueIndex = 0;
    final int methodIndex = 1;
    final int startArgsIndex = 2;
    final CompletableFuture<List<EvalNode>> nodesFuture = evalAllNodesOfCurrent(expNode, context);

    return nodesFuture.thenCompose(nodes -> {
        final EvalNode valueNode = nodes.get(valueIndex);
        final StringEvalNode methodNode = (StringEvalNode) nodes.get(methodIndex);
        final List<EvalNode> argsNode = new ArrayList<>();
        argsNode.add(valueNode);//  w w  w .  j  a va  2s .c  o m
        argsNode.addAll(nodes.subList(startArgsIndex, nodes.size()));

        return context.call(valueNode, methodNode.getValue(), argsNode);
    });
}

From source file:ru.histone.v2.evaluator.Evaluator.java

private CompletableFuture<EvalNode> processMethod(ExpAstNode expNode, Context context, List<EvalNode> args) {
    final int valueIndex = 0;
    final int methodIndex = 1;
    final CompletableFuture<List<EvalNode>> processNodes = sequence(
            Arrays.asList(evaluateNode(expNode.getNode(valueIndex), context),
                    evaluateNode(expNode.getNode(methodIndex), context)));
    return processNodes.thenCompose(methodNodes -> {
        final EvalNode valueNode = methodNodes.get(valueIndex);
        final StringEvalNode methodNode = (StringEvalNode) methodNodes.get(methodIndex);
        final List<EvalNode> argsNodes = new ArrayList<>();
        argsNodes.add(valueNode);/*  w w w .  j  av a2  s. co m*/
        argsNodes.addAll(args);

        return context.call(valueNode, methodNode.getValue(), argsNodes);
    });
}

From source file:ru.histone.v2.evaluator.Evaluator.java

private CompletableFuture<EvalNode> processTernary(ExpAstNode expNode, Context context) {
    CompletableFuture<EvalNode> condition = evaluateNode(expNode.getNode(0), context);
    return condition.thenCompose(conditionNode -> {
        if (nodeAsBoolean(conditionNode)) {
            return evaluateNode(expNode.getNode(1), context);
        } else if (expNode.getNode(2) != null) {
            return evaluateNode(expNode.getNode(2), context);
        }// w w  w  . j a  v  a2  s  . c o  m
        return CompletableFuture.completedFuture(EmptyEvalNode.INSTANCE);
    });
}