Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> mergeRegions(List<byte[]> nameOfRegionsToMerge, boolean forcible) {
    if (nameOfRegionsToMerge.size() < 2) {
        return failedFuture(
                new IllegalArgumentException("Can not merge only " + nameOfRegionsToMerge.size() + " region"));
    }/*from w ww  .  j av a 2 s.c  o  m*/
    CompletableFuture<Void> future = new CompletableFuture<>();
    byte[][] encodedNameOfRegionsToMerge = nameOfRegionsToMerge.stream().map(this::toEncodeRegionName)
            .toArray(byte[][]::new);

    addListener(checkRegionsAndGetTableName(encodedNameOfRegionsToMerge), (tableName, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }

        MergeTableRegionsRequest request = null;
        try {
            request = RequestConverter.buildMergeTableRegionsRequest(encodedNameOfRegionsToMerge, forcible,
                    ng.getNonceGroup(), ng.newNonce());
        } catch (DeserializationException e) {
            future.completeExceptionally(e);
            return;
        }

        addListener(this.<MergeTableRegionsRequest, MergeTableRegionsResponse>procedureCall(tableName, request,
                (s, c, req, done) -> s.mergeTableRegions(c, req, done), (resp) -> resp.getProcId(),
                new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        future.complete(ret);
                    }
                });
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

@Override
public void checkGC(int gcIntervalInSeconds) {
    if (isActive()) {
        lastActive = System.nanoTime();
    } else if (System.nanoTime() - lastActive < TimeUnit.SECONDS.toNanos(gcIntervalInSeconds)) {
        // Gc interval did not expire yet
        return;// w ww .  j  a  v a 2  s.c  om
    } else if (shouldTopicBeRetained()) {
        // Topic activity is still within the retention period
        return;
    } else {
        CompletableFuture<Void> replCloseFuture = new CompletableFuture<>();

        if (TopicName.get(topic).isGlobal()) {
            // For global namespace, close repl producers first.
            // Once all repl producers are closed, we can delete the topic,
            // provided no remote producers connected to the broker.
            if (log.isDebugEnabled()) {
                log.debug("[{}] Global topic inactive for {} seconds, closing repl producers.", topic,
                        gcIntervalInSeconds);
            }
            closeReplProducersIfNoBacklog().thenRun(() -> {
                if (hasRemoteProducers()) {
                    if (log.isDebugEnabled()) {
                        log.debug("[{}] Global topic has connected remote producers. Not a candidate for GC",
                                topic);
                    }
                    replCloseFuture.completeExceptionally(
                            new TopicBusyException("Topic has connected remote producers"));
                } else {
                    log.info("[{}] Global topic inactive for {} seconds, closed repl producers", topic,
                            gcIntervalInSeconds);
                    replCloseFuture.complete(null);
                }
            }).exceptionally(e -> {
                if (log.isDebugEnabled()) {
                    log.debug("[{}] Global topic has replication backlog. Not a candidate for GC", topic);
                }
                replCloseFuture.completeExceptionally(e.getCause());
                return null;
            });
        } else {
            replCloseFuture.complete(null);
        }

        replCloseFuture.thenCompose(v -> delete(true))
                .thenRun(() -> log.info("[{}] Topic deleted successfully due to inactivity", topic))
                .exceptionally(e -> {
                    if (e.getCause() instanceof TopicBusyException) {
                        // topic became active again
                        if (log.isDebugEnabled()) {
                            log.debug("[{}] Did not delete busy topic: {}", topic, e.getCause().getMessage());
                        }
                    } else {
                        log.warn("[{}] Inactive topic deletion failed", topic, e);
                    }
                    return null;
                });

    }
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> split(TableName tableName) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(tableExists(tableName), (exist, error) -> {
        if (error != null) {
            future.completeExceptionally(error);
            return;
        }// w  ww.  j a  v a2s  . c om
        if (!exist) {
            future.completeExceptionally(new TableNotFoundException(tableName));
            return;
        }
        addListener(
                metaTable.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)
                        .withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION))
                        .withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))),
                (results, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                        return;
                    }
                    if (results != null && !results.isEmpty()) {
                        List<CompletableFuture<Void>> splitFutures = new ArrayList<>();
                        for (Result r : results) {
                            if (r.isEmpty() || MetaTableAccessor.getRegionInfo(r) == null) {
                                continue;
                            }
                            RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
                            if (rl != null) {
                                for (HRegionLocation h : rl.getRegionLocations()) {
                                    if (h != null && h.getServerName() != null) {
                                        RegionInfo hri = h.getRegion();
                                        if (hri == null || hri.isSplitParent()
                                                || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
                                            continue;
                                        }
                                        splitFutures.add(split(hri, null));
                                    }
                                }
                            }
                        }
                        addListener(
                                CompletableFuture.allOf(
                                        splitFutures.toArray(new CompletableFuture<?>[splitFutures.size()])),
                                (ret, exception) -> {
                                    if (exception != null) {
                                        future.completeExceptionally(exception);
                                        return;
                                    }
                                    future.complete(ret);
                                });
                    } else {
                        future.complete(null);
                    }
                });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> split(TableName tableName, byte[] splitPoint) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    if (splitPoint == null) {
        return failedFuture(new IllegalArgumentException("splitPoint can not be null."));
    }//from w  w w .  j  a  va2 s.  c  om
    addListener(connection.getRegionLocator(tableName).getRegionLocation(splitPoint, true), (loc, err) -> {
        if (err != null) {
            result.completeExceptionally(err);
        } else if (loc == null || loc.getRegion() == null) {
            result.completeExceptionally(new IllegalArgumentException(
                    "Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint)));
        } else {
            addListener(splitRegion(loc.getRegion().getRegionName(), splitPoint), (ret, err2) -> {
                if (err2 != null) {
                    result.completeExceptionally(err2);
                } else {
                    result.complete(ret);
                }

            });
        }
    });
    return result;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> splitRegion(byte[] regionName) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(getRegionLocation(regionName), (location, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }//from   www .j a v  a2 s . c om
        RegionInfo regionInfo = location.getRegion();
        if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
                    + "Replicas are auto-split when their primary is split."));
            return;
        }
        ServerName serverName = location.getServerName();
        if (serverName == null) {
            future.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
            return;
        }
        addListener(split(regionInfo, null), (ret, err2) -> {
            if (err2 != null) {
                future.completeExceptionally(err2);
            } else {
                future.complete(ret);
            }
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> splitRegion(byte[] regionName, byte[] splitPoint) {
    Preconditions.checkNotNull(splitPoint,
            "splitPoint is null. If you don't specify a splitPoint, use splitRegion(byte[]) instead");
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(getRegionLocation(regionName), (location, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }/*from ww  w. j  av a  2s . c om*/
        RegionInfo regionInfo = location.getRegion();
        if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
                    + "Replicas are auto-split when their primary is split."));
            return;
        }
        ServerName serverName = location.getServerName();
        if (serverName == null) {
            future.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
            return;
        }
        if (regionInfo.getStartKey() != null && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) {
            future.completeExceptionally(
                    new IllegalArgumentException("should not give a splitkey which equals to startkey!"));
            return;
        }
        addListener(split(regionInfo, splitPoint), (ret, err2) -> {
            if (err2 != null) {
                future.completeExceptionally(err2);
            } else {
                future.complete(ret);
            }
        });
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

public CompletableFuture<MessageId> terminate() {
    CompletableFuture<MessageId> future = new CompletableFuture<>();
    ledger.asyncTerminate(new TerminateCallback() {
        @Override//from   w w  w  .  ja  v  a  2s.c om
        public void terminateComplete(Position lastCommittedPosition, Object ctx) {
            producers.forEach(Producer::disconnect);
            subscriptions.forEach((name, sub) -> sub.topicTerminated());

            PositionImpl lastPosition = (PositionImpl) lastCommittedPosition;
            MessageId messageId = new MessageIdImpl(lastPosition.getLedgerId(), lastPosition.getEntryId(), -1);

            log.info("[{}] Topic terminated at {}", getName(), messageId);
            future.complete(messageId);
        }

        @Override
        public void terminateFailed(ManagedLedgerException exception, Object ctx) {
            future.completeExceptionally(exception);
        }
    }, null);

    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Void> split(final RegionInfo hri, byte[] splitPoint) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    TableName tableName = hri.getTable();
    SplitTableRegionRequest request = null;
    try {//w  w w. ja  va  2  s  .co  m
        request = RequestConverter.buildSplitTableRegionRequest(hri, splitPoint, ng.getNonceGroup(),
                ng.newNonce());
    } catch (DeserializationException e) {
        future.completeExceptionally(e);
        return future;
    }

    addListener(this.<SplitTableRegionRequest, SplitTableRegionResponse>procedureCall(tableName, request,
            (s, c, req, done) -> s.splitRegion(c, req, done), (resp) -> resp.getProcId(),
            new SplitTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(err2);
                } else {
                    future.complete(ret);
                }
            });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> assign(byte[] regionName) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(getRegionInfo(regionName), (regionInfo, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }/*w  w w .j a v  a2 s . c om*/
        addListener(this.<Void>newMasterCaller().priority(regionInfo.getTable())
                .action(((controller, stub) -> this.<AssignRegionRequest, AssignRegionResponse, Void>call(
                        controller, stub, RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()),
                        (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null)))
                .call(), (ret, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        future.complete(ret);
                    }
                });
    });
    return future;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

private void maybeOffload(CompletableFuture<PositionImpl> finalPromise) {
    if (!offloadMutex.tryLock()) {
        scheduledExecutor.schedule(safeRun(() -> maybeOffloadInBackground(finalPromise)), 100,
                TimeUnit.MILLISECONDS);
    } else {/*from   w  ww  . java  2s.com*/
        CompletableFuture<PositionImpl> unlockingPromise = new CompletableFuture<>();
        unlockingPromise.whenComplete((res, ex) -> {
            offloadMutex.unlock();
            if (ex != null) {
                finalPromise.completeExceptionally(ex);
            } else {
                finalPromise.complete(res);
            }
        });

        long threshold = config.getOffloadAutoTriggerSizeThresholdBytes();
        long sizeSummed = 0;
        long alreadyOffloadedSize = 0;
        long toOffloadSize = 0;

        ConcurrentLinkedDeque<LedgerInfo> toOffload = new ConcurrentLinkedDeque();

        // go through ledger list from newest to oldest and build a list to offload in oldest to newest order
        for (Map.Entry<Long, LedgerInfo> e : ledgers.descendingMap().entrySet()) {
            long size = e.getValue().getSize();
            sizeSummed += size;
            boolean alreadyOffloaded = e.getValue().hasOffloadContext()
                    && e.getValue().getOffloadContext().getComplete();
            if (alreadyOffloaded) {
                alreadyOffloadedSize += size;
            } else if (sizeSummed > threshold) {
                toOffloadSize += size;
                toOffload.addFirst(e.getValue());
            }
        }

        if (toOffload.size() > 0) {
            log.info(
                    "[{}] Going to automatically offload ledgers {}"
                            + ", total size = {}, already offloaded = {}, to offload = {}",
                    name, toOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList()), sizeSummed,
                    alreadyOffloadedSize, toOffloadSize);
        } else {
            // offloadLoop will complete immediately with an empty list to offload
            log.debug("[{}] Nothing to offload, total size = {}, already offloaded = {}, threshold = {}", name,
                    sizeSummed, alreadyOffloadedSize, threshold);
        }

        offloadLoop(unlockingPromise, toOffload, PositionImpl.latest, Optional.empty());
    }
}