List of usage examples for java.util.concurrent CompletableFuture completeExceptionally
public boolean completeExceptionally(Throwable ex)
From source file:org.apache.bookkeeper.meta.MockLedgerManager.java
@Override public CompletableFuture<Versioned<LedgerMetadata>> createLedgerMetadata(long ledgerId, LedgerMetadata metadata) {// w w w .j a v a2 s . c o m CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>(); executor.submit(() -> { if (metadataMap.containsKey(ledgerId)) { executeCallback(() -> promise.completeExceptionally(new BKException.BKLedgerExistException())); } else { try { metadataMap.put(ledgerId, Pair.of(new LongVersion(0L), serDe.serialize(metadata))); Versioned<LedgerMetadata> readBack = readMetadata(ledgerId); executeCallback(() -> promise.complete(readBack)); } catch (Exception e) { LOG.error("Error reading back written metadata", e); executeCallback(() -> promise.completeExceptionally(new BKException.MetaStoreException())); } } }); return promise; }
From source file:org.apache.bookkeeper.meta.MockLedgerManager.java
@Override public CompletableFuture<Versioned<LedgerMetadata>> readLedgerMetadata(long ledgerId) { CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>(); executor.submit(() -> {//from w ww .j a va2s .c o m try { Versioned<LedgerMetadata> metadata = readMetadata(ledgerId); if (metadata == null) { executeCallback( () -> promise.completeExceptionally(new BKException.BKNoSuchLedgerExistsException())); } else { executeCallback(() -> promise.complete(metadata)); } } catch (Exception e) { LOG.error("Error reading metadata", e); executeCallback(() -> promise.completeExceptionally(new BKException.MetaStoreException())); } }); return promise; }
From source file:org.apache.bookkeeper.meta.MockLedgerManager.java
@Override public CompletableFuture<Versioned<LedgerMetadata>> writeLedgerMetadata(long ledgerId, LedgerMetadata metadata, Version currentVersion) {/*from w w w . ja v a 2s .c o m*/ CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>(); preWriteHook.runHook(ledgerId, metadata).thenComposeAsync((ignore) -> { try { Versioned<LedgerMetadata> oldMetadata = readMetadata(ledgerId); if (oldMetadata == null) { return FutureUtils.exception(new BKException.BKNoSuchLedgerExistsException()); } else if (!oldMetadata.getVersion().equals(currentVersion)) { return FutureUtils.exception(new BKException.BKMetadataVersionException()); } else { LongVersion oldVersion = (LongVersion) oldMetadata.getVersion(); metadataMap.put(ledgerId, Pair.of(new LongVersion(oldVersion.getLongVersion() + 1), serDe.serialize(metadata))); Versioned<LedgerMetadata> readBack = readMetadata(ledgerId); return FutureUtils.value(readBack); } } catch (Exception e) { LOG.error("Error writing metadata", e); return FutureUtils.exception(e); } }, executor).whenComplete((res, ex) -> { if (ex != null) { Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex; executeCallback(() -> promise.completeExceptionally(cause)); } else { executeCallback(() -> promise.complete(res)); } }); return promise; }
From source file:org.apache.bookkeeper.metadata.etcd.Etcd64bitIdGeneratorTest.java
/** * Test generating id in parallel and ensure there is no duplicated id. *//*from w w w. j a va 2s .com*/ @Test public void testGenerateIdParallel() throws Exception { final int numThreads = 10; @Cleanup("shutdown") ExecutorService executor = Executors.newFixedThreadPool(numThreads); final int numIds = 10000; final AtomicLong totalIds = new AtomicLong(numIds); final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>()); final RateLimiter limiter = RateLimiter.create(1000); final CompletableFuture<Void> doneFuture = new CompletableFuture<>(); for (int i = 0; i < numThreads; i++) { executor.submit(() -> { Client client = Client.builder().endpoints(etcdContainer.getClientEndpoint()).build(); Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(client.getKVClient(), scope); AtomicBoolean running = new AtomicBoolean(true); while (running.get()) { limiter.acquire(); GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>(); gen.generateLedgerId(genFuture); genFuture.thenAccept(lid -> { boolean duplicatedFound = !(ids.add(lid)); if (duplicatedFound) { running.set(false); doneFuture.completeExceptionally( new IllegalStateException("Duplicated id " + lid + " generated : " + ids)); return; } else { if (totalIds.decrementAndGet() <= 0) { running.set(false); doneFuture.complete(null); } } }).exceptionally(cause -> { running.set(false); doneFuture.completeExceptionally(cause); return null; }); } }); } FutureUtils.result(doneFuture); assertTrue(totalIds.get() <= 0); assertTrue(ids.size() >= numIds); }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
CompletableFuture<ReadHandle> getLedgerHandle(long ledgerId) { CompletableFuture<ReadHandle> ledgerHandle = ledgerCache.get(ledgerId); if (ledgerHandle != null) { return ledgerHandle; }/*from w w w. j a v a 2 s . c o m*/ // If not present try again and create if necessary return ledgerCache.computeIfAbsent(ledgerId, lid -> { // Open the ledger for reading if it was not already opened if (log.isDebugEnabled()) { log.debug("[{}] Asynchronously opening ledger {} for read", name, ledgerId); } mbean.startDataLedgerOpenOp(); CompletableFuture<ReadHandle> promise = new CompletableFuture<>(); LedgerInfo info = ledgers.get(ledgerId); CompletableFuture<ReadHandle> openFuture = new CompletableFuture<>(); if (info != null && info.hasOffloadContext() && info.getOffloadContext().getComplete()) { UUID uid = new UUID(info.getOffloadContext().getUidMsb(), info.getOffloadContext().getUidLsb()); // TODO: improve this to load ledger offloader by driver name recorded in metadata openFuture = config.getLedgerOffloader().readOffloaded(ledgerId, uid, OffloadUtils.getOffloadDriverMetadata(info)); } else { openFuture = bookKeeper.newOpenLedgerOp().withRecovery(!isReadOnly()).withLedgerId(ledgerId) .withDigestType(config.getDigestType()).withPassword(config.getPassword()).execute(); } openFuture.whenCompleteAsync((res, ex) -> { mbean.endDataLedgerOpenOp(); if (ex != null) { ledgerCache.remove(ledgerId, promise); promise.completeExceptionally(createManagedLedgerException(ex)); } else { if (log.isDebugEnabled()) { log.debug("[{}] Successfully opened ledger {} for reading", name, ledgerId); } promise.complete(res); } }, executor.chooseThread(name)); return promise; }); }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
private void maybeOffload(CompletableFuture<PositionImpl> finalPromise) { if (!offloadMutex.tryLock()) { scheduledExecutor.schedule(safeRun(() -> maybeOffloadInBackground(finalPromise)), 100, TimeUnit.MILLISECONDS); } else {/*from w w w . j a va 2 s . c o m*/ CompletableFuture<PositionImpl> unlockingPromise = new CompletableFuture<>(); unlockingPromise.whenComplete((res, ex) -> { offloadMutex.unlock(); if (ex != null) { finalPromise.completeExceptionally(ex); } else { finalPromise.complete(res); } }); long threshold = config.getOffloadAutoTriggerSizeThresholdBytes(); long sizeSummed = 0; long alreadyOffloadedSize = 0; long toOffloadSize = 0; ConcurrentLinkedDeque<LedgerInfo> toOffload = new ConcurrentLinkedDeque(); // go through ledger list from newest to oldest and build a list to offload in oldest to newest order for (Map.Entry<Long, LedgerInfo> e : ledgers.descendingMap().entrySet()) { long size = e.getValue().getSize(); sizeSummed += size; boolean alreadyOffloaded = e.getValue().hasOffloadContext() && e.getValue().getOffloadContext().getComplete(); if (alreadyOffloaded) { alreadyOffloadedSize += size; } else if (sizeSummed > threshold) { toOffloadSize += size; toOffload.addFirst(e.getValue()); } } if (toOffload.size() > 0) { log.info( "[{}] Going to automatically offload ledgers {}" + ", total size = {}, already offloaded = {}, to offload = {}", name, toOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList()), sizeSummed, alreadyOffloadedSize, toOffloadSize); } else { // offloadLoop will complete immediately with an empty list to offload log.debug("[{}] Nothing to offload, total size = {}, already offloaded = {}, threshold = {}", name, sizeSummed, alreadyOffloadedSize, threshold); } offloadLoop(unlockingPromise, toOffload, PositionImpl.latest, Optional.empty()); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
/** * Checks whether there are ledger that have been fully consumed and deletes them. * * @throws Exception//from w w w . j a va2 s . co m */ void internalTrimConsumedLedgers(CompletableFuture<?> promise) { // Ensure only one trimming operation is active if (!trimmerMutex.tryLock()) { scheduleDeferredTrimming(promise); return; } List<LedgerInfo> ledgersToDelete = Lists.newArrayList(); List<LedgerInfo> offloadedLedgersToDelete = Lists.newArrayList(); synchronized (this) { if (log.isDebugEnabled()) { log.debug("[{}] Start TrimConsumedLedgers. ledgers={} totalSize={}", name, ledgers.keySet(), TOTAL_SIZE_UPDATER.get(this)); } if (STATE_UPDATER.get(this) == State.Closed) { log.debug("[{}] Ignoring trimming request since the managed ledger was already closed", name); trimmerMutex.unlock(); promise.completeExceptionally(new ManagedLedgerAlreadyClosedException("Can't trim closed ledger")); return; } long slowestReaderLedgerId = -1; if (cursors.isEmpty()) { // At this point the lastLedger will be pointing to the // ledger that has just been closed, therefore the +1 to // include lastLedger in the trimming. slowestReaderLedgerId = currentLedger.getId() + 1; } else { PositionImpl slowestReaderPosition = cursors.getSlowestReaderPosition(); if (slowestReaderPosition != null) { slowestReaderLedgerId = slowestReaderPosition.getLedgerId(); } else { promise.completeExceptionally(new ManagedLedgerException("Couldn't find reader position")); trimmerMutex.unlock(); return; } } if (log.isDebugEnabled()) { log.debug("[{}] Slowest consumer ledger id: {}", name, slowestReaderLedgerId); } // skip ledger if retention constraint met for (LedgerInfo ls : ledgers.headMap(slowestReaderLedgerId, false).values()) { boolean expired = hasLedgerRetentionExpired(ls.getTimestamp()); boolean overRetentionQuota = isLedgerRetentionOverSizeQuota(); if (log.isDebugEnabled()) { log.debug( "[{}] Checking ledger {} -- time-old: {} sec -- " + "expired: {} -- over-quota: {} -- current-ledger: {}", name, ls.getLedgerId(), (clock.millis() - ls.getTimestamp()) / 1000.0, expired, overRetentionQuota, currentLedger.getId()); } if (ls.getLedgerId() == currentLedger.getId()) { log.debug("[{}] ledger id skipped for deletion as it is currently being written to", name, ls.getLedgerId()); break; } else if (expired) { log.debug("[{}] Ledger {} has expired, ts {}", name, ls.getLedgerId(), ls.getTimestamp()); ledgersToDelete.add(ls); } else if (overRetentionQuota) { log.debug("[{}] Ledger {} is over quota", name, ls.getLedgerId()); ledgersToDelete.add(ls); } else { log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, ls.getLedgerId()); break; } } for (LedgerInfo ls : ledgers.values()) { if (isOffloadedNeedsDelete(ls.getOffloadContext()) && !ledgersToDelete.contains(ls)) { log.debug("[{}] Ledger {} has been offloaded, bookkeeper ledger needs to be deleted", name, ls.getLedgerId()); offloadedLedgersToDelete.add(ls); } } if (ledgersToDelete.isEmpty() && offloadedLedgersToDelete.isEmpty()) { trimmerMutex.unlock(); promise.complete(null); return; } if (STATE_UPDATER.get(this) == State.CreatingLedger // Give up now and schedule a new trimming || !ledgersListMutex.tryLock()) { // Avoid deadlocks with other operations updating the ledgers list scheduleDeferredTrimming(promise); trimmerMutex.unlock(); return; } // Update metadata for (LedgerInfo ls : ledgersToDelete) { ledgerCache.remove(ls.getLedgerId()); ledgers.remove(ls.getLedgerId()); NUMBER_OF_ENTRIES_UPDATER.addAndGet(this, -ls.getEntries()); TOTAL_SIZE_UPDATER.addAndGet(this, -ls.getSize()); entryCache.invalidateAllEntries(ls.getLedgerId()); } for (LedgerInfo ls : offloadedLedgersToDelete) { LedgerInfo.Builder newInfoBuilder = ls.toBuilder(); newInfoBuilder.getOffloadContextBuilder().setBookkeeperDeleted(true); String driverName = OffloadUtils.getOffloadDriverName(ls, config.getLedgerOffloader().getOffloadDriverName()); Map<String, String> driverMetadata = OffloadUtils.getOffloadDriverMetadata(ls, config.getLedgerOffloader().getOffloadDriverMetadata()); OffloadUtils.setOffloadDriverMetadata(newInfoBuilder, driverName, driverMetadata); ledgers.put(ls.getLedgerId(), newInfoBuilder.build()); } if (log.isDebugEnabled()) { log.debug("[{}] Updating of ledgers list after trimming", name); } store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, new MetaStoreCallback<Void>() { @Override public void operationComplete(Void result, Stat stat) { log.info("[{}] End TrimConsumedLedgers. ledgers={} totalSize={}", name, ledgers.size(), TOTAL_SIZE_UPDATER.get(ManagedLedgerImpl.this)); ledgersStat = stat; ledgersListMutex.unlock(); trimmerMutex.unlock(); for (LedgerInfo ls : ledgersToDelete) { log.info("[{}] Removing ledger {} - size: {}", name, ls.getLedgerId(), ls.getSize()); asyncDeleteLedger(ls.getLedgerId(), ls); } for (LedgerInfo ls : offloadedLedgersToDelete) { log.info("[{}] Deleting offloaded ledger {} from bookkeeper - size: {}", name, ls.getLedgerId(), ls.getSize()); asyncDeleteLedgerFromBookKeeper(ls.getLedgerId()); } promise.complete(null); } @Override public void operationFailed(MetaStoreException e) { log.warn("[{}] Failed to update the list of ledgers after trimming", name, e); ledgersListMutex.unlock(); trimmerMutex.unlock(); promise.completeExceptionally(e); } }); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
@Override public Position offloadPrefix(Position pos) throws InterruptedException, ManagedLedgerException { CompletableFuture<Position> promise = new CompletableFuture<>(); asyncOffloadPrefix(pos, new OffloadCallback() { @Override//from www. j a va 2 s .c o m public void offloadComplete(Position offloadedTo, Object ctx) { promise.complete(offloadedTo); } @Override public void offloadFailed(ManagedLedgerException e, Object ctx) { promise.completeExceptionally(e); } }, null); try { return promise.get(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS); } catch (TimeoutException te) { throw new ManagedLedgerException("Timeout during managed ledger offload operation"); } catch (ExecutionException e) { log.error("[{}] Error offloading. pos = {}", name, pos, e.getCause()); throw ManagedLedgerException.getManagedLedgerException(e.getCause()); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
private void offloadLoop(CompletableFuture<PositionImpl> promise, Queue<LedgerInfo> ledgersToOffload, PositionImpl firstUnoffloaded, Optional<Throwable> firstError) { LedgerInfo info = ledgersToOffload.poll(); if (info == null) { if (firstError.isPresent()) { promise.completeExceptionally(firstError.get()); } else {//from w w w . ja v a 2s .c o m promise.complete(firstUnoffloaded); } } else { long ledgerId = info.getLedgerId(); UUID uuid = UUID.randomUUID(); Map<String, String> extraMetadata = ImmutableMap.of("ManagedLedgerName", name); String driverName = config.getLedgerOffloader().getOffloadDriverName(); Map<String, String> driverMetadata = config.getLedgerOffloader().getOffloadDriverMetadata(); prepareLedgerInfoForOffloaded(ledgerId, uuid, driverName, driverMetadata) .thenCompose((ignore) -> getLedgerHandle(ledgerId)) .thenCompose(readHandle -> config.getLedgerOffloader().offload(readHandle, uuid, extraMetadata)) .thenCompose((ignore) -> { return Retries .run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), TimeUnit.SECONDS.toHours(1)).limit(10), FAIL_ON_CONFLICT, () -> completeLedgerInfoForOffloaded(ledgerId, uuid), scheduledExecutor, name) .whenComplete((ignore2, exception) -> { if (exception != null) { cleanupOffloaded(ledgerId, uuid, driverName, driverMetadata, "Metastore failure"); } }); }).whenComplete((ignore, exception) -> { if (exception != null) { log.info("[{}] Exception occurred during offload", name, exception); PositionImpl newFirstUnoffloaded = PositionImpl.get(ledgerId, 0); if (newFirstUnoffloaded.compareTo(firstUnoffloaded) > 0) { newFirstUnoffloaded = firstUnoffloaded; } Optional<Throwable> errorToReport = firstError; synchronized (ManagedLedgerImpl.this) { // if the ledger doesn't exist anymore, ignore the error if (ledgers.containsKey(ledgerId)) { errorToReport = Optional.of(firstError.orElse(exception)); } } offloadLoop(promise, ledgersToOffload, newFirstUnoffloaded, errorToReport); } else { ledgerCache.remove(ledgerId); offloadLoop(promise, ledgersToOffload, firstUnoffloaded, firstError); } }); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
private void tryTransformLedgerInfo(long ledgerId, LedgerInfoTransformation transformation, CompletableFuture<Void> finalPromise) { synchronized (this) { if (!ledgersListMutex.tryLock()) { // retry in 100 milliseconds scheduledExecutor.schedule(/*from ww w .j av a 2 s .c om*/ safeRun(() -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise)), 100, TimeUnit.MILLISECONDS); } else { // lock acquired CompletableFuture<Void> unlockingPromise = new CompletableFuture<>(); unlockingPromise.whenComplete((res, ex) -> { ledgersListMutex.unlock(); if (ex != null) { finalPromise.completeExceptionally(ex); } else { finalPromise.complete(res); } }); LedgerInfo oldInfo = ledgers.get(ledgerId); if (oldInfo == null) { unlockingPromise.completeExceptionally(new OffloadConflict( "Ledger " + ledgerId + " no longer exists in ManagedLedger, likely trimmed")); } else { try { LedgerInfo newInfo = transformation.transform(oldInfo); ledgers.put(ledgerId, newInfo); store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, new MetaStoreCallback<Void>() { @Override public void operationComplete(Void result, Stat stat) { ledgersStat = stat; unlockingPromise.complete(null); } @Override public void operationFailed(MetaStoreException e) { unlockingPromise.completeExceptionally(e); } }); } catch (ManagedLedgerException mle) { unlockingPromise.completeExceptionally(mle); } } } } }