Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> createHistoryTableIfAbsent(Data<Integer> data) {
    Preconditions.checkNotNull(data);//from  w w w.  ja  v  a2s. c o  m
    Preconditions.checkNotNull(data.getData());

    CompletableFuture<Void> result = new CompletableFuture<>();

    synchronized (lock) {
        if (historyTable == null) {
            historyTable = new Data<>(Arrays.copyOf(data.getData(), data.getData().length), 0);
        }
    }
    return CompletableFuture.completedFuture(null);
}

From source file:org.mascherl.example.service.ComposeMailService.java

public CompletableFuture<List<MailAddressUsage>> getLastSendToAddressesAsync2(User currentUser, int limit) {
    CompletableFuture<List<MailAddressUsage>> completableFuture = new CompletableFuture<>();
    db.query(/*from  w w w.  j  a v  a  2  s  . c o  m*/
            "select distinct mto.address, m.datetime " + "from mail m "
                    + "join mail_to mto on mto.mail_uuid = m.uuid " + "where m.user_uuid = $1 "
                    + "and m.mail_type = $2 " + "and not exists (" + "   select 1 from mail m2 "
                    + "   join mail_to mto2 on mto2.mail_uuid = m2.uuid " + "   where m2.user_uuid = $1 "
                    + "   and m2.mail_type = $2 " + "   and mto2.address = mto.address "
                    + "   and m2.datetime > m.datetime " + ") " + "order by m.datetime desc " + "limit $3",
            Arrays.asList(currentUser.getUuid(), MailType.SENT.name(), limit), result -> {
                try {
                    TimestampColumnZonedDateTimeMapper dateTimeColumnMapper = new PersistentZonedDateTime()
                            .getColumnMapper();
                    List<MailAddressUsage> usages = StreamSupport.stream(result.spliterator(), false)
                            .map(row -> new MailAddressUsage(new MailAddress(row.getString(0)),
                                    dateTimeColumnMapper.fromNonNullValue(row.getTimestamp(1))))
                            .collect(Collectors.toList());
                    completableFuture.complete(usages);
                } catch (Exception e) {
                    completableFuture.completeExceptionally(e);
                }
            }, completableFuture::completeExceptionally);

    return completableFuture;
}

From source file:org.apache.pulsar.functions.runtime.ProcessRuntime.java

public CompletableFuture<InstanceCommunication.HealthCheckResult> healthCheck() {
    CompletableFuture<InstanceCommunication.HealthCheckResult> retval = new CompletableFuture<>();
    if (stub == null) {
        retval.completeExceptionally(new RuntimeException("Not alive"));
        return retval;
    }//from w w w . j  a va  2s  .c om
    ListenableFuture<InstanceCommunication.HealthCheckResult> response = stub
            .withDeadlineAfter(GRPC_TIMEOUT_SECS, TimeUnit.SECONDS).healthCheck(Empty.newBuilder().build());
    Futures.addCallback(response, new FutureCallback<InstanceCommunication.HealthCheckResult>() {
        @Override
        public void onFailure(Throwable throwable) {
            retval.completeExceptionally(throwable);
        }

        @Override
        public void onSuccess(InstanceCommunication.HealthCheckResult t) {
            retval.complete(t);
        }
    });
    return retval;
}

From source file:com.devicehive.service.UserService.java

public CompletableFuture<List<UserVO>> list(String login, String loginPattern, Integer role, Integer status,
        String sortField, Boolean sortOrderAsc, Integer take, Integer skip) {
    ListUserRequest request = new ListUserRequest();
    request.setLogin(login);/*  w  w  w.  j  av a 2  s.co m*/
    request.setLoginPattern(loginPattern);
    request.setRole(role);
    request.setStatus(status);
    request.setSortField(sortField);
    request.setSortOrderAsc(sortOrderAsc);
    request.setTake(take);
    request.setSkip(skip);

    CompletableFuture<Response> future = new CompletableFuture<>();

    rpcClient.call(Request.newBuilder().withBody(request).build(), new ResponseConsumer(future));

    return future.thenApply(r -> ((ListUserResponse) r.getBody()).getUsers());
}

From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java

@Override
public CompletableFuture<Long> countObjectsBySpec(QueryComponent<FileDescriptor> spec) {
    try {//from w w  w.  ja va 2 s  . c o  m
        return CompletableFuture.completedFuture(
                new DataStoreCursor(getFolderFilenames(output_directory, fileContext)).count());
    } catch (IllegalArgumentException | IOException e) {
        final CompletableFuture<Long> fut = new CompletableFuture<Long>();
        fut.completeExceptionally(e);
        return fut;
    }
}

From source file:io.pravega.client.stream.mock.MockController.java

private CompletableFuture<Void> abortTxSegment(UUID txId, Segment segment) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    FailingReplyProcessor replyProcessor = new FailingReplyProcessor() {

        @Override/*from  w w w  .  j a v  a  2  s  .co m*/
        public void connectionDropped() {
            result.completeExceptionally(new ConnectionClosedException());
        }

        @Override
        public void wrongHost(WrongHost wrongHost) {
            result.completeExceptionally(new NotImplementedException());
        }

        @Override
        public void transactionCommitted(TransactionCommitted transactionCommitted) {
            result.completeExceptionally(new RuntimeException("Transaction already committed."));
        }

        @Override
        public void transactionAborted(TransactionAborted transactionAborted) {
            result.complete(null);
        }

        @Override
        public void processingFailure(Exception error) {
            result.completeExceptionally(error);
        }
    };
    sendRequestOverNewConnection(new AbortTransaction(idGenerator.get(), segment.getScopedName(), txId),
            replyProcessor, result);
    return result;
}

From source file:org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider.java

private CompletableFuture<Void> updateSubscriptionPermissionAsync(NamespaceName namespace,
        String subscriptionName, Set<String> roles, boolean remove) {
    CompletableFuture<Void> result = new CompletableFuture<>();

    try {//from w ww  . j  a v a2s.c  om
        validatePoliciesReadOnlyAccess();
    } catch (Exception e) {
        result.completeExceptionally(e);
    }

    ZooKeeper globalZk = configCache.getZooKeeper();
    final String policiesPath = String.format("/%s/%s/%s", "admin", POLICIES, namespace.toString());

    try {
        Stat nodeStat = new Stat();
        byte[] content = globalZk.getData(policiesPath, null, nodeStat);
        Policies policies = getThreadLocal().readValue(content, Policies.class);
        if (remove) {
            if (policies.auth_policies.subscription_auth_roles.get(subscriptionName) != null) {
                policies.auth_policies.subscription_auth_roles.get(subscriptionName).removeAll(roles);
            } else {
                log.info("[{}] Couldn't find role {} while revoking for sub = {}", namespace, subscriptionName,
                        roles);
                result.completeExceptionally(new IllegalArgumentException("couldn't find subscription"));
                return result;
            }
        } else {
            policies.auth_policies.subscription_auth_roles.put(subscriptionName, roles);
        }

        // Write back the new policies into zookeeper
        globalZk.setData(policiesPath, getThreadLocal().writeValueAsBytes(policies), nodeStat.getVersion());

        configCache.policiesCache().invalidate(policiesPath);

        log.info("[{}] Successfully granted access for role {} for sub = {}", namespace, subscriptionName,
                roles);
        result.complete(null);
    } catch (KeeperException.NoNodeException e) {
        log.warn("[{}] Failed to set permissions for namespace {}: does not exist", subscriptionName,
                namespace);
        result.completeExceptionally(new IllegalArgumentException("Namespace does not exist" + namespace));
    } catch (KeeperException.BadVersionException e) {
        log.warn("[{}] Failed to set permissions for {} on namespace {}: concurrent modification",
                subscriptionName, roles, namespace);
        result.completeExceptionally(new IllegalStateException(
                "Concurrent modification on zk path: " + policiesPath + ", " + e.getMessage()));
    } catch (Exception e) {
        log.error("[{}] Failed to get permissions for role {} on namespace {}", subscriptionName, roles,
                namespace, e);
        result.completeExceptionally(
                new IllegalStateException("Failed to get permissions for namespace " + namespace));
    }

    return result;
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Checks the jar cache to see if an entry already exists for this list of jars,
 * returns the path of that entry if it does exist, otherwise creates the jar, adds
 * the path to the cache and returns it.
 * /*ww  w  . ja  va2 s .  c o  m*/
 * @param jars_to_merge
 * @return
 * @throws Exception 
 */
public static synchronized CompletableFuture<String> buildOrReturnCachedStormTopologyJar(
        final Collection<String> jars_to_merge, final String cached_jar_dir) {
    CompletableFuture<String> future = new CompletableFuture<String>();
    final String hashed_jar_name = JarBuilderUtil.getHashedJarName(jars_to_merge, cached_jar_dir);
    //1. Check cache for this jar via hash of jar names
    if (storm_topology_jars_cache.containsKey(hashed_jar_name)) {
        //if exists:
        //2. validate jars has not been updated
        Date most_recent_update = JarBuilderUtil.getMostRecentlyUpdatedFile(jars_to_merge);
        //if the cache is more recent than any of the files, we assume nothing has been updated
        if (storm_topology_jars_cache.get(hashed_jar_name).getTime() > most_recent_update.getTime()) {
            //RETURN return cached jar file path
            _logger.debug("Returning a cached copy of the jar");
            //update the cache copy to set its modified time to now so we don't clean it up
            JarBuilderUtil.updateJarModifiedTime(hashed_jar_name);
            future.complete(hashed_jar_name);
            return future;
        } else {
            //delete cache copy
            _logger.debug("Removing an expired cached copy of the jar");
            removeCachedJar(hashed_jar_name);
        }
    }

    //if we fall through
    //3. create jar
    _logger.debug("Fell through or cache copy is old, have to create a new version");
    if (buildStormTopologyJar(jars_to_merge, hashed_jar_name)) {
        //4. add jar to cache w/ current/newest file timestamp      
        storm_topology_jars_cache.put(hashed_jar_name, new Date());
        //RETURN return new jar file path
        future.complete(hashed_jar_name);
    } else {
        //had an error creating jar, throw an exception?
        future.completeExceptionally(new Exception("Error trying to create storm jar, see logs"));
    }
    return future;

}

From source file:org.apache.pulsar.client.impl.ConsumerImpl.java

@Override
public CompletableFuture<Void> unsubscribeAsync() {
    if (getState() == State.Closing || getState() == State.Closed) {
        return FutureUtil
                .failedFuture(new PulsarClientException.AlreadyClosedException("Consumer was already closed"));
    }//ww  w  .  j  ava2 s .  c  om
    final CompletableFuture<Void> unsubscribeFuture = new CompletableFuture<>();
    if (isConnected()) {
        setState(State.Closing);
        long requestId = client.newRequestId();
        ByteBuf unsubscribe = Commands.newUnsubscribe(consumerId, requestId);
        ClientCnx cnx = cnx();
        cnx.sendRequestWithId(unsubscribe, requestId).thenRun(() -> {
            cnx.removeConsumer(consumerId);
            unAckedMessageTracker.close();
            if (possibleSendToDeadLetterTopicMessages != null) {
                possibleSendToDeadLetterTopicMessages.clear();
            }
            client.cleanupConsumer(ConsumerImpl.this);
            log.info("[{}][{}] Successfully unsubscribed from topic", topic, subscription);
            setState(State.Closed);
            unsubscribeFuture.complete(null);
        }).exceptionally(e -> {
            log.error("[{}][{}] Failed to unsubscribe: {}", topic, subscription, e.getCause().getMessage());
            setState(State.Ready);
            unsubscribeFuture.completeExceptionally(e.getCause());
            return null;
        });
    } else {
        unsubscribeFuture.completeExceptionally(new PulsarClientException("Not connected to broker"));
    }
    return unsubscribeFuture;
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testTrimOccursDuringOffloadLedgerDeletedBeforeOffload() throws Exception {
    CountDownLatch offloadStarted = new CountDownLatch(1);
    CompletableFuture<Long> blocker = new CompletableFuture<>();
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//  w w  w .  j ava2s. c  om
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadStarted.countDown();
            return blocker.thenCompose((trimmedLedger) -> {
                if (trimmedLedger == ledger.getId()) {
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new BKException.BKNoSuchLedgerExistsException());
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");

    for (int i = 0; i < 21; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }
    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3);

    PositionImpl startOfSecondLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0);
    PositionImpl startOfThirdLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(2).getLedgerId(), 0);

    // trigger an offload which should offload the first two ledgers
    OffloadCallbackPromise cbPromise = new OffloadCallbackPromise();
    ledger.asyncOffloadPrefix(startOfThirdLedger, cbPromise, null);
    offloadStarted.await();

    // trim first ledger
    long trimmedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    cursor.markDelete(startOfSecondLedger, new HashMap<>());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getLedgerId() == trimmedLedger).count(), 0);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);

    // complete offloading
    blocker.complete(trimmedLedger);
    cbPromise.get();

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
    Assert.assertEquals(offloader.offloadedLedgers().size(), 1);
    Assert.assertTrue(
            offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId()));
}