Example usage for java.util.concurrent CompletableFuture completeExceptionally

List of usage examples for java.util.concurrent CompletableFuture completeExceptionally

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completeExceptionally.

Prototype

public boolean completeExceptionally(Throwable ex) 

Source Link

Document

If not already completed, causes invocations of #get() and related methods to throw the given exception.

Usage

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testTrimOccursDuringOffloadLedgerDeletedBeforeOffload() throws Exception {
    CountDownLatch offloadStarted = new CountDownLatch(1);
    CompletableFuture<Long> blocker = new CompletableFuture<>();
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//from ww  w  .  ja va2  s  . co  m
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadStarted.countDown();
            return blocker.thenCompose((trimmedLedger) -> {
                if (trimmedLedger == ledger.getId()) {
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new BKException.BKNoSuchLedgerExistsException());
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");

    for (int i = 0; i < 21; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }
    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3);

    PositionImpl startOfSecondLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0);
    PositionImpl startOfThirdLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(2).getLedgerId(), 0);

    // trigger an offload which should offload the first two ledgers
    OffloadCallbackPromise cbPromise = new OffloadCallbackPromise();
    ledger.asyncOffloadPrefix(startOfThirdLedger, cbPromise, null);
    offloadStarted.await();

    // trim first ledger
    long trimmedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    cursor.markDelete(startOfSecondLedger, new HashMap<>());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getLedgerId() == trimmedLedger).count(), 0);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);

    // complete offloading
    blocker.complete(trimmedLedger);
    cbPromise.get();

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
    Assert.assertEquals(offloader.offloadedLedgers().size(), 1);
    Assert.assertTrue(
            offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId()));
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadConflict() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//from  w  ww.j a  v  a  2s  .c om
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return errorLedgers.thenCompose((errors) -> {
                if (errors.remove(ledger.getId())) {
                    failedOffloads.add(Pair.of(ledger.getId(), uuid));
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new Exception("Some kind of error"));
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }

        @Override
        public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uuid,
                Map<String, String> offloadDriverMetadata) {
            deleted.add(Pair.of(ledgerId, uuid));
            return super.deleteOffloaded(ledgerId, uuid, offloadDriverMetadata);
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Set<Long> errorSet = ConcurrentHashMap.newKeySet();
    errorSet.add(ledger.getLedgersInfoAsList().get(0).getLedgerId());
    errorLedgers.complete(errorSet);

    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException e) {
        // expected
    }
    Assert.assertTrue(errorSet.isEmpty());
    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 0);

    long expectedFailedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    UUID expectedFailedUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertEquals(failedOffloads.stream().findFirst().get(),
            Pair.of(expectedFailedLedger, expectedFailedUUID));
    Assert.assertFalse(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());

    // try offload again
    ledger.offloadPrefix(ledger.getLastConfirmedEntry());

    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 1);
    Assert.assertEquals(deleted.stream().findFirst().get(), Pair.of(expectedFailedLedger, expectedFailedUUID));
    UUID successUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertFalse(successUUID.equals(expectedFailedUUID));
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadDeleteIncomplete() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*w  ww.  j  a va2s.c  om*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return super.offload(ledger, uuid, extraMetadata).thenCompose((res) -> {
                CompletableFuture<Void> f = new CompletableFuture<>();
                f.completeExceptionally(new Exception("Fail after offload occurred"));
                return f;
            });
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");
    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException mle) {
        // expected
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);

    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().hasUidMsb()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().hasUidMsb());

    long firstLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    long secondLedger = ledger.getLedgersInfoAsList().get(1).getLedgerId();

    cursor.markDelete(ledger.getLastConfirmedEntry());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 1);
    Assert.assertEquals(ledger.getLedgersInfoAsList().get(0).getLedgerId(), secondLedger);

    assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger));
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java

@Override
public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) {
    CompletableFuture<Void> promise = new CompletableFuture<>();
    scheduler.chooseThread(readHandle.getId()).submit(() -> {
        if (readHandle.getLength() == 0 || !readHandle.isClosed() || readHandle.getLastAddConfirmed() < 0) {
            promise.completeExceptionally(
                    new IllegalArgumentException("An empty or open ledger should never be offloaded"));
            return;
        }//from ww w. j a  v  a2s  . c  om
        OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create()
                .withLedgerMetadata(readHandle.getLedgerMetadata())
                .withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize());
        String dataBlockKey = dataBlockOffloadKey(readHandle.getId(), uuid);
        String indexBlockKey = indexBlockOffloadKey(readHandle.getId(), uuid);

        MultipartUpload mpu = null;
        List<MultipartPart> parts = Lists.newArrayList();

        // init multi part upload for data block.
        try {
            BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey);
            addVersionInfo(blobBuilder, userMetadata);
            Blob blob = blobBuilder.build();
            mpu = writeBlobStore.initiateMultipartUpload(writeBucket, blob.getMetadata(), new PutOptions());
        } catch (Throwable t) {
            promise.completeExceptionally(t);
            return;
        }

        long dataObjectLength = 0;
        // start multi part upload for data block.
        try {
            long startEntry = 0;
            int partId = 1;
            long entryBytesWritten = 0;
            while (startEntry <= readHandle.getLastAddConfirmed()) {
                int blockSize = BlockAwareSegmentInputStreamImpl.calculateBlockSize(maxBlockSize, readHandle,
                        startEntry, entryBytesWritten);

                try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl(readHandle,
                        startEntry, blockSize)) {

                    Payload partPayload = Payloads.newInputStreamPayload(blockStream);
                    partPayload.getContentMetadata().setContentLength((long) blockSize);
                    partPayload.getContentMetadata().setContentType("application/octet-stream");
                    parts.add(writeBlobStore.uploadMultipartPart(mpu, partId, partPayload));
                    log.debug("UploadMultipartPart. container: {}, blobName: {}, partId: {}, mpu: {}",
                            writeBucket, dataBlockKey, partId, mpu.id());

                    indexBuilder.addBlock(startEntry, partId, blockSize);

                    if (blockStream.getEndEntryId() != -1) {
                        startEntry = blockStream.getEndEntryId() + 1;
                    } else {
                        // could not read entry from ledger.
                        break;
                    }
                    entryBytesWritten += blockStream.getBlockEntryBytesCount();
                    partId++;
                }

                dataObjectLength += blockSize;
            }

            writeBlobStore.completeMultipartUpload(mpu, parts);
            mpu = null;
        } catch (Throwable t) {
            try {
                if (mpu != null) {
                    writeBlobStore.abortMultipartUpload(mpu);
                }
            } catch (Throwable throwable) {
                log.error("Failed abortMultipartUpload in bucket - {} with key - {}, uploadId - {}.",
                        writeBucket, dataBlockKey, mpu.id(), throwable);
            }
            promise.completeExceptionally(t);
            return;
        }

        // upload index block
        try (OffloadIndexBlock index = indexBuilder.withDataObjectLength(dataObjectLength).build();
                OffloadIndexBlock.IndexInputStream indexStream = index.toStream()) {
            // write the index block
            BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey);
            addVersionInfo(blobBuilder, userMetadata);
            Payload indexPayload = Payloads.newInputStreamPayload(indexStream);
            indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize());
            indexPayload.getContentMetadata().setContentType("application/octet-stream");

            Blob blob = blobBuilder.payload(indexPayload).contentLength((long) indexStream.getStreamSize())
                    .build();

            writeBlobStore.putBlob(writeBucket, blob);
            promise.complete(null);
        } catch (Throwable t) {
            try {
                writeBlobStore.removeBlob(writeBucket, dataBlockKey);
            } catch (Throwable throwable) {
                log.error("Failed deleteObject in bucket - {} with key - {}.", writeBucket, dataBlockKey,
                        throwable);
            }
            promise.completeExceptionally(t);
            return;
        }
    });
    return promise;
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java

@Override
public CompletableFuture<ReadHandle> readOffloaded(long ledgerId, UUID uid,
        Map<String, String> offloadDriverMetadata) {
    String readBucket = getReadBucket(offloadDriverMetadata);
    BlobStore readBlobstore = getReadBlobStore(offloadDriverMetadata);

    CompletableFuture<ReadHandle> promise = new CompletableFuture<>();
    String key = dataBlockOffloadKey(ledgerId, uid);
    String indexKey = indexBlockOffloadKey(ledgerId, uid);
    scheduler.chooseThread(ledgerId).submit(() -> {
        try {/*from  w ww .  ja  v a  2  s.com*/
            promise.complete(BlobStoreBackedReadHandleImpl.open(scheduler.chooseThread(ledgerId), readBlobstore,
                    readBucket, key, indexKey, VERSION_CHECK, ledgerId, readBufferSize));
        } catch (Throwable t) {
            log.error("Failed readOffloaded: ", t);
            promise.completeExceptionally(t);
        }
    });
    return promise;
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java

@Override
public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uid,
        Map<String, String> offloadDriverMetadata) {
    String readBucket = getReadBucket(offloadDriverMetadata);
    BlobStore readBlobstore = getReadBlobStore(offloadDriverMetadata);

    CompletableFuture<Void> promise = new CompletableFuture<>();
    scheduler.chooseThread(ledgerId).submit(() -> {
        try {//  ww w  .j  a va 2s  .  co m
            readBlobstore.removeBlobs(readBucket,
                    ImmutableList.of(dataBlockOffloadKey(ledgerId, uid), indexBlockOffloadKey(ledgerId, uid)));
            promise.complete(null);
        } catch (Throwable t) {
            log.error("Failed delete Blob", t);
            promise.completeExceptionally(t);
        }
    });

    return promise;
}

From source file:org.apache.bookkeeper.mledger.offload.OffloaderUtils.java

/**
 * Extract the Pulsar offloader class from a offloader archive.
 *
 * @param narPath nar package path/*from ww w  .ja  va2  s  .  c  om*/
 * @return the offloader class name
 * @throws IOException when fail to retrieve the pulsar offloader class
 */
static Pair<NarClassLoader, LedgerOffloaderFactory> getOffloaderFactory(String narPath) throws IOException {
    NarClassLoader ncl = NarClassLoader.getFromArchive(new File(narPath), Collections.emptySet());
    String configStr = ncl.getServiceDefinition(PULSAR_OFFLOADER_SERVICE_NAME);

    OffloaderDefinition conf = ObjectMapperFactory.getThreadLocalYaml().readValue(configStr,
            OffloaderDefinition.class);
    if (StringUtils.isEmpty(conf.getOffloaderFactoryClass())) {
        throw new IOException(String.format(
                "The '%s' offloader does not provide an offloader factory implementation", conf.getName()));
    }

    try {
        // Try to load offloader factory class and check it implements Offloader interface
        Class factoryClass = ncl.loadClass(conf.getOffloaderFactoryClass());
        CompletableFuture<LedgerOffloaderFactory> loadFuture = new CompletableFuture<>();
        Thread loadingThread = new Thread(() -> {
            Thread.currentThread().setContextClassLoader(ncl);

            log.info("Loading offloader factory {} using class loader {}", factoryClass, ncl);
            try {
                Object offloader = factoryClass.newInstance();
                if (!(offloader instanceof LedgerOffloaderFactory)) {
                    throw new IOException("Class " + conf.getOffloaderFactoryClass()
                            + " does not implement interface " + LedgerOffloaderFactory.class.getName());
                }
                loadFuture.complete((LedgerOffloaderFactory) offloader);
            } catch (Throwable t) {
                loadFuture.completeExceptionally(t);
            }
        }, "load-factory-" + factoryClass);
        try {
            loadingThread.start();
            return Pair.of(ncl, loadFuture.get());
        } finally {
            loadingThread.join();
        }
    } catch (Throwable t) {
        rethrowIOException(t);
    }
    return null;
}

From source file:org.apache.bookkeeper.tests.integration.utils.DockerUtils.java

public static void dumpContainerLogToTarget(DockerClient docker, String containerId) {
    File output = new File(getTargetDirectory(containerId), "docker.log");
    try (FileOutputStream os = new FileOutputStream(output)) {
        CompletableFuture<Boolean> future = new CompletableFuture<>();
        docker.logContainerCmd(containerId).withStdOut(true).withStdErr(true).withTimestamps(true)
                .exec(new ResultCallback<Frame>() {
                    @Override// w  w  w  .j  ava  2  s .c  o m
                    public void close() {
                    }

                    @Override
                    public void onStart(Closeable closeable) {
                    }

                    @Override
                    public void onNext(Frame object) {
                        try {
                            os.write(object.getPayload());
                        } catch (IOException e) {
                            onError(e);
                        }
                    }

                    @Override
                    public void onError(Throwable throwable) {
                        future.completeExceptionally(throwable);
                    }

                    @Override
                    public void onComplete() {
                        future.complete(true);
                    }
                });
        future.get();
    } catch (RuntimeException | ExecutionException | IOException e) {
        LOG.error("Error dumping log for {}", containerId, e);
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        LOG.info("Interrupted dumping log from container {}", containerId, ie);
    }
}

From source file:org.apache.bookkeeper.tests.integration.utils.DockerUtils.java

public static String runCommand(DockerClient docker, String containerId, boolean ignoreError, String... cmd)
        throws Exception {
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    String execid = docker.execCreateCmd(containerId).withCmd(cmd).withAttachStderr(true).withAttachStdout(true)
            .exec().getId();/*from   ww w.ja v  a  2  s  .  c o  m*/
    String cmdString = Arrays.stream(cmd).collect(Collectors.joining(" "));
    StringBuffer output = new StringBuffer();
    docker.execStartCmd(execid).withDetach(false).exec(new ResultCallback<Frame>() {
        @Override
        public void close() {
        }

        @Override
        public void onStart(Closeable closeable) {
            LOG.info("DOCKER.exec({}:{}): Executing...", containerId, cmdString);
        }

        @Override
        public void onNext(Frame object) {
            LOG.info("DOCKER.exec({}:{}): {}", containerId, cmdString, object);
            output.append(new String(object.getPayload(), UTF_8));
        }

        @Override
        public void onError(Throwable throwable) {
            future.completeExceptionally(throwable);
        }

        @Override
        public void onComplete() {
            LOG.info("DOCKER.exec({}:{}): Done", containerId, cmdString);
            future.complete(true);
        }
    });
    future.get();

    InspectExecResponse resp = docker.inspectExecCmd(execid).exec();
    while (resp.isRunning()) {
        Thread.sleep(200);
        resp = docker.inspectExecCmd(execid).exec();
    }
    int retCode = resp.getExitCode();
    if (retCode != 0) {
        LOG.error("DOCKER.exec({}:{}): failed with {} : {}", containerId, cmdString, retCode, output);
        if (!ignoreError) {
            throw new Exception(
                    String.format("cmd(%s) failed on %s with exitcode %d", cmdString, containerId, retCode));
        }
    } else {
        LOG.info("DOCKER.exec({}:{}): completed with {}", containerId, cmdString, retCode);
    }
    return output.toString();
}

From source file:org.apache.distributedlog.auditor.DLAuditor.java

/**
 * Find leak ledgers phase 1: collect ledgers set.
 */// w ww.  j  a v  a 2 s.c  o m
private Set<Long> collectLedgersFromBK(BookKeeperClient bkc, final ExecutorService executorService)
        throws IOException {
    LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get());

    final Set<Long> ledgers = new HashSet<Long>();
    final CompletableFuture<Void> doneFuture = FutureUtils.createFuture();

    BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() {
        @Override
        public void process(Long lid, final AsyncCallback.VoidCallback cb) {
            synchronized (ledgers) {
                ledgers.add(lid);
                if (0 == ledgers.size() % 1000) {
                    logger.info("Collected {} ledgers", ledgers.size());
                }
            }
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    cb.processResult(BKException.Code.OK, null, null);
                }
            });

        }
    };
    AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx) {
            if (BKException.Code.OK == rc) {
                doneFuture.complete(null);
            } else {
                doneFuture.completeExceptionally(BKException.create(rc));
            }
        }
    };
    lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException);
    try {
        doneFuture.get();
        logger.info("Collected total {} ledgers", ledgers.size());
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new DLInterruptedException("Interrupted on collecting ledgers : ", e);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof IOException) {
            throw (IOException) (e.getCause());
        } else {
            throw new IOException("Failed to collect ledgers : ", e.getCause());
        }
    }
    return ledgers;
}