Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:io.pravega.controller.server.eventProcessor.ScaleRequestHandler.java

/**
 * Scale tasks exceptions are absorbed.//from w w  w. j a  v a  2  s . c o m
 *
 * @param request   incoming request from request stream.
 * @param segments  segments to seal
 * @param newRanges new ranges for segments to create
 * @param context   operation context
 * @return CompletableFuture
 */
private CompletableFuture<Void> executeScaleTask(final ScaleEvent request, final ArrayList<Integer> segments,
        final ArrayList<AbstractMap.SimpleEntry<Double, Double>> newRanges, final OperationContext context) {
    CompletableFuture<Void> result = new CompletableFuture<>();

    streamMetadataTasks.scale(request.getScope(), request.getStream(), segments, newRanges,
            System.currentTimeMillis(), context).whenCompleteAsync((res, e) -> {
                if (e != null) {
                    log.warn("Scale failed for request {}/{}/{} with exception {}", request.getScope(),
                            request.getStream(), request.getSegmentNumber(), e);
                    Throwable cause = ExceptionHelpers.getRealException(e);
                    if (cause instanceof LockFailedException) {
                        result.completeExceptionally(cause);
                    } else {
                        result.completeExceptionally(e);
                    }
                } else {
                    // completed - either successfully or with pre-condition-failure. Clear markers on all scaled segments.
                    log.error("scale done for {}/{}/{}", request.getScope(), request.getStream(),
                            request.getSegmentNumber());
                    result.complete(null);

                    clearMarkers(request.getScope(), request.getStream(), segments, context);
                }
            }, executor);

    return result;
}

From source file:io.pravega.controller.server.SegmentHelper.java

public CompletableFuture<Void> updatePolicy(String scope, String stream, ScalingPolicy policy,
        int segmentNumber, HostControllerStore hostControllerStore, ConnectionFactory clientCF) {
    final CompletableFuture<Void> result = new CompletableFuture<>();
    final Controller.NodeUri uri = getSegmentUri(scope, stream, segmentNumber, hostControllerStore);

    final WireCommandType type = WireCommandType.UPDATE_SEGMENT_POLICY;
    final FailingReplyProcessor replyProcessor = new FailingReplyProcessor() {

        @Override/*from  ww  w. j  a v  a 2s .  com*/
        public void connectionDropped() {
            result.completeExceptionally(
                    new WireCommandFailedException(type, WireCommandFailedException.Reason.ConnectionDropped));
        }

        @Override
        public void wrongHost(WireCommands.WrongHost wrongHost) {
            result.completeExceptionally(
                    new WireCommandFailedException(type, WireCommandFailedException.Reason.UnknownHost));
        }

        @Override
        public void segmentPolicyUpdated(WireCommands.SegmentPolicyUpdated policyUpdated) {
            result.complete(null);
        }

        @Override
        public void processingFailure(Exception error) {
            result.completeExceptionally(error);
        }
    };

    Pair<Byte, Integer> extracted = extractFromPolicy(policy);

    WireCommands.UpdateSegmentPolicy request = new WireCommands.UpdateSegmentPolicy(idGenerator.get(),
            Segment.getScopedName(scope, stream, segmentNumber), extracted.getLeft(), extracted.getRight());
    sendRequestAsync(request, replyProcessor, result, clientCF, ModelHelper.encode(uri));
    return result;
}

From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java

private CompletableFuture<Channel> openChannel(Address address) {
    Bootstrap bootstrap = bootstrapClient(address);
    CompletableFuture<Channel> retFuture = new CompletableFuture<>();
    ChannelFuture f = bootstrap.connect();

    f.addListener(future -> {/*  w ww.ja v a 2  s  . c o  m*/
        if (future.isSuccess()) {
            retFuture.complete(f.channel());
        } else {
            retFuture.completeExceptionally(future.cause());
        }
    });
    log.debug("Established a new connection to {}", address);
    return retFuture;
}

From source file:org.apache.bookkeeper.stream.storage.impl.sc.ZkStorageContainerManagerTest.java

/**
 * Test basic operations such as starting or stopping containers.
 *//*www.j av a2s  . co  m*/
@Test
public void testBasicOps() throws Exception {
    // start the storage container manager
    scManager.start();

    long containerId = 11L;
    long containerId2 = 22L;

    // mock a container and start it in the registry
    CompletableFuture<StorageContainer> startFuture = new CompletableFuture<>();
    CompletableFuture<Void> stopFuture = new CompletableFuture<>();
    CompletableFuture<StorageContainer> startFuture2 = new CompletableFuture<>();
    CompletableFuture<Void> stopFuture2 = new CompletableFuture<>();

    StorageContainer mockSc = createStorageContainer(containerId, startFuture, stopFuture);
    when(mockScFactory.createStorageContainer(eq(containerId))).thenReturn(mockSc);

    StorageContainer mockSc2 = createStorageContainer(containerId2, startFuture2, stopFuture2);
    when(mockScFactory.createStorageContainer(eq(containerId2))).thenReturn(mockSc2);

    // update assignment map
    ClusterAssignmentData cad = ClusterAssignmentData.newBuilder()
            .putServers(NetUtils.endpointToString(myEndpoint),
                    ServerAssignmentData.newBuilder().addContainers(containerId).build())
            .build();
    clusterMetadataStore.updateClusterAssignmentData(cad);

    // notify the container to complete startup
    startFuture.complete(mockSc);
    verify(scRegistry, timeout(10000).times(1)).startStorageContainer(eq(containerId));
    MoreAsserts.assertUtil(ignored -> scManager.getLiveContainers().size() >= 1, () -> null);
    assertEquals(1, scManager.getLiveContainers().size());
    assertTrue(scManager.getLiveContainers().containsKey(containerId));

    // update assignment map to remove containerId and add containerId2
    ClusterAssignmentData newCad = ClusterAssignmentData.newBuilder()
            .putServers(NetUtils.endpointToString(myEndpoint),
                    ServerAssignmentData.newBuilder().addContainers(22L).build())
            .build();
    clusterMetadataStore.updateClusterAssignmentData(newCad);

    // notify the container1 to stop and container2 to start
    FutureUtils.complete(stopFuture, null);
    startFuture2.complete(mockSc2);
    verify(scRegistry, timeout(10000).times(1)).stopStorageContainer(eq(containerId), same(mockSc));
    verify(scRegistry, timeout(10000).times(1)).startStorageContainer(eq(containerId2));
    MoreAsserts.assertUtil(ignored -> !scManager.getLiveContainers().containsKey(containerId)
            && scManager.getLiveContainers().containsKey(containerId2), () -> null);
    assertEquals(1, scManager.getLiveContainers().size());
    assertFalse(scManager.getLiveContainers().containsKey(containerId));
    assertTrue(scManager.getLiveContainers().containsKey(containerId2));
}

From source file:org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.java

private boolean tryComplete(LocateRequest req, CompletableFuture<HRegionLocation> future, HRegionLocation loc) {
    if (future.isDone()) {
        return true;
    }//from ww  w.  ja v a2s.  co m
    boolean completed;
    if (req.locateType.equals(RegionLocateType.BEFORE)) {
        // for locating the row before current row, the common case is to find the previous region in
        // reverse scan, so we check the endKey first. In general, the condition should be startKey <
        // req.row and endKey >= req.row. Here we split it to endKey == req.row || (endKey > req.row
        // && startKey < req.row). The two conditions are equal since startKey < endKey.
        int c = Bytes.compareTo(loc.getRegionInfo().getEndKey(), req.row);
        completed = c == 0 || (c > 0 && Bytes.compareTo(loc.getRegionInfo().getStartKey(), req.row) < 0);
    } else {
        completed = loc.getRegionInfo().containsRow(req.row);
    }
    if (completed) {
        future.complete(loc);
        return true;
    } else {
        return false;
    }
}

From source file:io.pravega.controller.server.SegmentHelper.java

public CompletableFuture<Boolean> createSegment(final String scope, final String stream,
        final int segmentNumber, final ScalingPolicy policy, final HostControllerStore hostControllerStore,
        final ConnectionFactory clientCF) {
    final CompletableFuture<Boolean> result = new CompletableFuture<>();
    final Controller.NodeUri uri = getSegmentUri(scope, stream, segmentNumber, hostControllerStore);

    final WireCommandType type = WireCommandType.CREATE_SEGMENT;

    final FailingReplyProcessor replyProcessor = new FailingReplyProcessor() {

        @Override//from   www .j av  a  2  s . co  m
        public void connectionDropped() {
            result.completeExceptionally(
                    new WireCommandFailedException(type, WireCommandFailedException.Reason.ConnectionDropped));
        }

        @Override
        public void wrongHost(WireCommands.WrongHost wrongHost) {
            result.completeExceptionally(
                    new WireCommandFailedException(type, WireCommandFailedException.Reason.UnknownHost));
        }

        @Override
        public void segmentAlreadyExists(WireCommands.SegmentAlreadyExists segmentAlreadyExists) {
            result.complete(true);
        }

        @Override
        public void segmentCreated(WireCommands.SegmentCreated segmentCreated) {
            result.complete(true);
        }

        @Override
        public void processingFailure(Exception error) {
            result.completeExceptionally(error);
        }
    };

    Pair<Byte, Integer> extracted = extractFromPolicy(policy);

    WireCommands.CreateSegment request = new WireCommands.CreateSegment(idGenerator.get(),
            Segment.getScopedName(scope, stream, segmentNumber), extracted.getLeft(), extracted.getRight());
    sendRequestAsync(request, replyProcessor, result, clientCF, ModelHelper.encode(uri));
    return result;
}

From source file:org.apache.distributedlog.BKLogHandler.java

private void completeReadLogSegmentsFromStore(final Set<String> removedSegments,
        final Map<String, LogSegmentMetadata> addedSegments, final Comparator<LogSegmentMetadata> comparator,
        final CompletableFuture<Versioned<List<LogSegmentMetadata>>> readResult,
        final Version logSegmentNamesVersion, final AtomicInteger numChildren,
        final AtomicInteger numFailures) {
    if (0 != numChildren.decrementAndGet()) {
        return;/*w  w w  .  ja v a2  s . c  o  m*/
    }
    if (numFailures.get() > 0) {
        return;
    }
    // update the cache only when fetch completed and before #getCachedLogSegments
    updateLogSegmentCache(removedSegments, addedSegments);
    List<LogSegmentMetadata> segmentList;
    try {
        segmentList = getCachedLogSegments(comparator);
    } catch (UnexpectedException e) {
        readResult.completeExceptionally(e);
        return;
    }
    readResult.complete(new Versioned<List<LogSegmentMetadata>>(segmentList, logSegmentNamesVersion));
}

From source file:org.apache.bookkeeper.metadata.etcd.Etcd64bitIdGeneratorTest.java

/**
 * Test generating id in parallel and ensure there is no duplicated id.
 *//*from  w  w  w .  j  av  a 2 s . c  o m*/
@Test
public void testGenerateIdParallel() throws Exception {
    final int numThreads = 10;
    @Cleanup("shutdown")
    ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    final int numIds = 10000;
    final AtomicLong totalIds = new AtomicLong(numIds);
    final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>());
    final RateLimiter limiter = RateLimiter.create(1000);
    final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
    for (int i = 0; i < numThreads; i++) {
        executor.submit(() -> {
            Client client = Client.builder().endpoints(etcdContainer.getClientEndpoint()).build();
            Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(client.getKVClient(), scope);

            AtomicBoolean running = new AtomicBoolean(true);

            while (running.get()) {
                limiter.acquire();

                GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>();
                gen.generateLedgerId(genFuture);

                genFuture.thenAccept(lid -> {
                    boolean duplicatedFound = !(ids.add(lid));
                    if (duplicatedFound) {
                        running.set(false);
                        doneFuture.completeExceptionally(
                                new IllegalStateException("Duplicated id " + lid + " generated : " + ids));
                        return;
                    } else {
                        if (totalIds.decrementAndGet() <= 0) {
                            running.set(false);
                            doneFuture.complete(null);
                        }
                    }
                }).exceptionally(cause -> {
                    running.set(false);
                    doneFuture.completeExceptionally(cause);
                    return null;
                });
            }
        });
    }

    FutureUtils.result(doneFuture);
    assertTrue(totalIds.get() <= 0);
    assertTrue(ids.size() >= numIds);
}

From source file:org.apache.distributedlog.lock.ZKSessionLock.java

private void deleteLockNode(final CompletableFuture<Void> promise) {
    if (null == currentNode) {
        promise.complete(null);
        return;/*w ww.  ja  va 2  s .  c om*/
    }

    zk.delete(currentNode, -1, new AsyncCallback.VoidCallback() {
        @Override
        public void processResult(final int rc, final String path, Object ctx) {
            lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() {
                @Override
                public void safeRun() {
                    if (KeeperException.Code.OK.intValue() == rc) {
                        LOG.info("Deleted lock node {} for {} successfully.", path, lockId);
                    } else if (KeeperException.Code.NONODE.intValue() == rc
                            || KeeperException.Code.SESSIONEXPIRED.intValue() == rc) {
                        LOG.info("Delete node failed. Node already gone for node {} id {}, rc = {}",
                                new Object[] { path, lockId, KeeperException.Code.get(rc) });
                    } else {
                        LOG.error("Failed on deleting lock node {} for {} : {}",
                                new Object[] { path, lockId, KeeperException.Code.get(rc) });
                    }

                    FailpointUtils.checkFailPointNoThrow(FailpointUtils.FailPointName.FP_LockUnlockCleanup);
                    promise.complete(null);
                }
            });
        }
    }, null);
}

From source file:com.yahoo.pulsar.broker.namespace.NamespaceService.java

private CompletableFuture<LookupResult> createLookupResult(String candidateBroker) throws Exception {

    CompletableFuture<LookupResult> lookupFuture = new CompletableFuture<>();
    try {/* w  w w  .  ja  va2s .  c  om*/
        checkArgument(StringUtils.isNotBlank(candidateBroker),
                "Lookup broker can't be null " + candidateBroker);
        URI uri = new URI(candidateBroker);
        String path = String.format("%s/%s:%s", SimpleLoadManagerImpl.LOADBALANCE_BROKERS_ROOT, uri.getHost(),
                uri.getPort());
        pulsar.getLocalZkCache().getDataAsync(path, loadReportDeserializer).thenAccept(reportData -> {
            if (reportData.isPresent()) {
                LoadReport report = reportData.get();
                lookupFuture.complete(new LookupResult(report.getWebServiceUrl(), report.getWebServiceUrlTls(),
                        report.getPulsarServiceUrl(), report.getPulsarServieUrlTls()));
            } else {
                lookupFuture.completeExceptionally(new KeeperException.NoNodeException(path));
            }
        }).exceptionally(ex -> {
            lookupFuture.completeExceptionally(ex);
            return null;
        });
    } catch (Exception e) {
        lookupFuture.completeExceptionally(e);
    }
    return lookupFuture;
}