List of usage examples for java.util.concurrent CompletableFuture CompletableFuture
public CompletableFuture()
From source file:io.pravega.service.server.host.stat.AutoScaleProcessor.java
private CompletableFuture<Void> writeRequest(ScaleEvent event) { CompletableFuture<Void> result = new CompletableFuture<>(); try {//from w w w . j ava 2s . com CompletableFuture.runAsync(() -> { try { writer.get().writeEvent(event.getKey(), event).get(); result.complete(null); } catch (InterruptedException | ExecutionException e) { log.error("error sending request to requeststream {}", e); result.completeExceptionally(e); } }, executor); } catch (RejectedExecutionException e) { log.error("our executor queue is full. failed to post scale event for {}/{}/{}", event.getScope(), event.getStream(), event.getSegmentNumber()); result.completeExceptionally(e); } return result; }
From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java
private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, MasterService.Interface stub, PREQ preq, MasterRpcCall<PRESP, PREQ> rpcCall, Converter<RESP, PRESP> respConverter) { CompletableFuture<RESP> future = new CompletableFuture<>(); rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() { @Override// w ww . j av a2 s. c o m public void run(PRESP resp) { if (controller.failed()) { future.completeExceptionally(controller.getFailed()); } else { try { future.complete(respConverter.convert(resp)); } catch (IOException e) { future.completeExceptionally(e); } } } }); return future; }
From source file:org.apache.distributedlog.BookKeeperClient.java
public CompletableFuture<LedgerHandle> createLedger(int ensembleSize, int writeQuorumSize, int ackQuorumSize) { BookKeeper bk;//from w w w .j a v a 2 s. com try { bk = get(); } catch (IOException ioe) { return FutureUtils.exception(ioe); } final CompletableFuture<LedgerHandle> promise = new CompletableFuture<LedgerHandle>(); bk.asyncCreateLedger(ensembleSize, writeQuorumSize, ackQuorumSize, BookKeeper.DigestType.CRC32, passwd, new AsyncCallback.CreateCallback() { @Override public void createComplete(int rc, LedgerHandle lh, Object ctx) { if (BKException.Code.OK == rc) { promise.complete(lh); } else { promise.completeExceptionally(BKException.create(rc)); } } }, null, Collections.emptyMap()); return promise; }
From source file:org.apache.pulsar.broker.lookup.TopicLookup.java
/** * * Lookup broker-service address for a given namespace-bundle which contains given topic. * * a. Returns broker-address if namespace-bundle is already owned by any broker * b. If current-broker receives lookup-request and if it's not a leader * then current broker redirects request to leader by returning leader-service address. * c. If current-broker is leader then it finds out least-loaded broker to own namespace bundle and * redirects request by returning least-loaded broker. * d. If current-broker receives request to own the namespace-bundle then it owns a bundle and returns * success(connect) response to client.//from www . j a v a 2 s .c o m * * @param pulsarService * @param topicName * @param authoritative * @param clientAppId * @param requestId * @return */ public static CompletableFuture<ByteBuf> lookupTopicAsync(PulsarService pulsarService, TopicName topicName, boolean authoritative, String clientAppId, AuthenticationDataSource authenticationData, long requestId) { final CompletableFuture<ByteBuf> validationFuture = new CompletableFuture<>(); final CompletableFuture<ByteBuf> lookupfuture = new CompletableFuture<>(); final String cluster = topicName.getCluster(); // (1) validate cluster getClusterDataIfDifferentCluster(pulsarService, cluster, clientAppId).thenAccept(differentClusterData -> { if (differentClusterData != null) { if (log.isDebugEnabled()) { log.debug("[{}] Redirecting the lookup call to {}/{} cluster={}", clientAppId, differentClusterData.getBrokerServiceUrl(), differentClusterData.getBrokerServiceUrlTls(), cluster); } validationFuture.complete(newLookupResponse(differentClusterData.getBrokerServiceUrl(), differentClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, requestId, false)); } else { // (2) authorize client try { checkAuthorization(pulsarService, topicName, clientAppId, authenticationData); } catch (RestException authException) { log.warn("Failed to authorized {} on cluster {}", clientAppId, topicName.toString()); validationFuture.complete(newLookupErrorResponse(ServerError.AuthorizationError, authException.getMessage(), requestId)); return; } catch (Exception e) { log.warn("Unknown error while authorizing {} on cluster {}", clientAppId, topicName.toString()); validationFuture.completeExceptionally(e); return; } // (3) validate global namespace checkLocalOrGetPeerReplicationCluster(pulsarService, topicName.getNamespaceObject()) .thenAccept(peerClusterData -> { if (peerClusterData == null) { // (4) all validation passed: initiate lookup validationFuture.complete(null); return; } // if peer-cluster-data is present it means namespace is owned by that peer-cluster and // request should be redirect to the peer-cluster if (StringUtils.isBlank(peerClusterData.getBrokerServiceUrl()) && StringUtils.isBlank(peerClusterData.getBrokerServiceUrl())) { validationFuture.complete(newLookupErrorResponse(ServerError.MetadataError, "Redirected cluster's brokerService url is not configured", requestId)); return; } validationFuture.complete(newLookupResponse(peerClusterData.getBrokerServiceUrl(), peerClusterData.getBrokerServiceUrlTls(), true, LookupType.Redirect, requestId, false)); }).exceptionally(ex -> { validationFuture.complete( newLookupErrorResponse(ServerError.MetadataError, ex.getMessage(), requestId)); return null; }); } }).exceptionally(ex -> { validationFuture.completeExceptionally(ex); return null; }); // Initiate lookup once validation completes validationFuture.thenAccept(validaitonFailureResponse -> { if (validaitonFailureResponse != null) { lookupfuture.complete(validaitonFailureResponse); } else { pulsarService.getNamespaceService().getBrokerServiceUrlAsync(topicName, authoritative) .thenAccept(lookupResult -> { if (log.isDebugEnabled()) { log.debug("[{}] Lookup result {}", topicName.toString(), lookupResult); } if (!lookupResult.isPresent()) { lookupfuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, "No broker was available to own " + topicName, requestId)); return; } LookupData lookupData = lookupResult.get().getLookupData(); if (lookupResult.get().isRedirect()) { boolean newAuthoritative = isLeaderBroker(pulsarService); lookupfuture.complete( newLookupResponse(lookupData.getBrokerUrl(), lookupData.getBrokerUrlTls(), newAuthoritative, LookupType.Redirect, requestId, false)); } else { // When running in standalone mode we want to redirect the client through the service // url, so that the advertised address configuration is not relevant anymore. boolean redirectThroughServiceUrl = pulsarService.getConfiguration() .isRunningStandalone(); lookupfuture.complete(newLookupResponse(lookupData.getBrokerUrl(), lookupData.getBrokerUrlTls(), true /* authoritative */, LookupType.Connect, requestId, redirectThroughServiceUrl)); } }).exceptionally(ex -> { if (ex instanceof CompletionException && ex.getCause() instanceof IllegalStateException) { log.info("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), ex.getCause().getMessage()); } else { log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), ex.getMessage(), ex); } lookupfuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId)); return null; }); } }).exceptionally(ex -> { if (ex instanceof CompletionException && ex.getCause() instanceof IllegalStateException) { log.info("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), ex.getCause().getMessage()); } else { log.warn("Failed to lookup {} for topic {} with error {}", clientAppId, topicName.toString(), ex.getMessage(), ex); } lookupfuture.complete(newLookupErrorResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId)); return null; }); return lookupfuture; }
From source file:io.pravega.segmentstore.server.reading.StorageReaderTests.java
/** * Tests the ability to auto-cancel the requests when the StorageReader is closed. *//*from ww w. j a v a2 s. co m*/ @Test public void testAutoCancelRequests() { final int readCount = 100; TestStorage storage = new TestStorage(); storage.readImplementation = CompletableFuture::new; // Just return a Future which we will never complete - simulates a high latency read. @Cleanup StorageReader reader = new StorageReader(SEGMENT_METADATA, storage, executorService()); // Create some reads. HashMap<StorageReader.Request, CompletableFuture<StorageReader.Result>> requestCompletions = new HashMap<>(); for (int i = 0; i < readCount; i++) { CompletableFuture<StorageReader.Result> requestCompletion = new CompletableFuture<>(); StorageReader.Request r = new StorageReader.Request(i * 10, 9, requestCompletion::complete, requestCompletion::completeExceptionally, TIMEOUT); reader.execute(r); requestCompletions.put(r, requestCompletion); } // Verify the reads aren't failed yet. for (val entry : requestCompletions.entrySet()) { Assert.assertFalse("Request is unexpectedly completed before close for request " + entry.getKey(), entry.getValue().isDone()); } // Close the reader and verify the reads have all been cancelled. reader.close(); for (val entry : requestCompletions.entrySet()) { Assert.assertTrue("Request is not completed with exception after close for request " + entry.getKey(), entry.getValue().isCompletedExceptionally()); AssertExtensions.assertThrows( "Request was not failed with a CancellationException after close for request " + entry.getKey(), entry.getValue()::join, ex -> ex instanceof CancellationException); } }
From source file:com.ikanow.aleph2.storm.harvest_technology.StormHarvestTechnologyModule.java
@Override public CompletableFuture<BasicMessageBean> onPeriodicPoll(DataBucketBean polled_bucket, IHarvestContext context) {/* w w w . j a v a 2 s . c o m*/ CompletableFuture<BasicMessageBean> future = new CompletableFuture<BasicMessageBean>(); TopologyInfo top_info; try { top_info = StormControllerUtil.getJobStats(storm_controller, getJobName(polled_bucket)); } catch (Exception ex) { //set failure in completable future future.complete(new BasicMessageBean(new Date(), false, null, "onPeriodicPoll", null, ErrorUtils.getLongForm("{0}", ex), null)); return future; } //TODO see if there is any info on this buckets harvest stats, can we //see how many documents have been sent via the spout or something? future.complete( new BasicMessageBean(new Date(), true, null, "onPeriodicPoll", null, top_info.toString(), null)); return future; }
From source file:org.springframework.integration.aggregator.AggregatorTests.java
@Test public void testCustomAggPerf() throws InterruptedException, ExecutionException, TimeoutException { class CustomHandler extends AbstractMessageHandler { // custom aggregator, only handles a single correlation private final ReentrantLock lock = new ReentrantLock(); private final Collection<Message<?>> messages = new ArrayList<Message<?>>(60000); private final MessageChannel outputChannel; private CustomHandler(MessageChannel outputChannel) { this.outputChannel = outputChannel; }//from w ww. j a v a2 s .co m @Override public void handleMessageInternal(Message<?> requestMessage) { lock.lock(); try { this.messages.add(requestMessage); if (this.messages.size() == 60000) { List<Object> payloads = new ArrayList<Object>(this.messages.size()); for (Message<?> message : this.messages) { payloads.add(message.getPayload()); } this.messages.clear(); outputChannel.send(getMessageBuilderFactory().withPayload(payloads) .copyHeaders(requestMessage.getHeaders()).build()); } } finally { lock.unlock(); } } } DirectChannel outputChannel = new DirectChannel(); CustomHandler handler = new CustomHandler(outputChannel); final CompletableFuture<Collection<?>> resultFuture = new CompletableFuture<>(); outputChannel.subscribe(message -> { Collection<?> payload = (Collection<?>) message.getPayload(); logger.warn("Received " + payload.size()); resultFuture.complete(payload); }); Message<?> message = new GenericMessage<String>("foo"); StopWatch stopwatch = new StopWatch(); stopwatch.start(); for (int i = 0; i < 120000; i++) { if (i % 10000 == 0) { stopwatch.stop(); logger.warn("Sent " + i + " in " + stopwatch.getTotalTimeSeconds() + " (10k in " + stopwatch.getLastTaskTimeMillis() + "ms)"); stopwatch.start(); } handler.handleMessage(message); } stopwatch.stop(); logger.warn("Sent " + 120000 + " in " + stopwatch.getTotalTimeSeconds() + " (10k in " + stopwatch.getLastTaskTimeMillis() + "ms)"); Collection<?> result = resultFuture.get(10, TimeUnit.SECONDS); assertNotNull(result); assertEquals(60000, result.size()); }
From source file:org.apache.pulsar.client.impl.BinaryProtoLookupService.java
@Override public CompletableFuture<List<String>> getTopicsUnderNamespace(NamespaceName namespace, Mode mode) { CompletableFuture<List<String>> topicsFuture = new CompletableFuture<List<String>>(); AtomicLong opTimeoutMs = new AtomicLong(client.getConfiguration().getOperationTimeoutMs()); Backoff backoff = new Backoff(100, TimeUnit.MILLISECONDS, opTimeoutMs.get() * 2, TimeUnit.MILLISECONDS, 0, TimeUnit.MILLISECONDS); getTopicsUnderNamespace(serviceNameResolver.resolveHost(), namespace, backoff, opTimeoutMs, topicsFuture, mode);//from w w w . j a va 2 s.com return topicsFuture; }
From source file:io.pravega.segmentstore.server.reading.StorageReadManagerTests.java
/** * Tests the ability to auto-cancel the requests when the StorageReadManager is closed. *///from w w w .j ava2 s .c o m @Test public void testAutoCancelRequests() { final int readCount = 100; TestStorage storage = new TestStorage(); storage.readImplementation = CompletableFuture::new; // Just return a Future which we will never complete - simulates a high latency read. @Cleanup StorageReadManager reader = new StorageReadManager(SEGMENT_METADATA, storage, executorService()); // Create some reads. HashMap<StorageReadManager.Request, CompletableFuture<StorageReadManager.Result>> requestCompletions = new HashMap<>(); for (int i = 0; i < readCount; i++) { CompletableFuture<StorageReadManager.Result> requestCompletion = new CompletableFuture<>(); StorageReadManager.Request r = new StorageReadManager.Request(i * 10, 9, requestCompletion::complete, requestCompletion::completeExceptionally, TIMEOUT); reader.execute(r); requestCompletions.put(r, requestCompletion); } // Verify the reads aren't failed yet. for (val entry : requestCompletions.entrySet()) { Assert.assertFalse("Request is unexpectedly completed before close for request " + entry.getKey(), entry.getValue().isDone()); } // Close the reader and verify the reads have all been cancelled. reader.close(); for (val entry : requestCompletions.entrySet()) { Assert.assertTrue("Request is not completed with exception after close for request " + entry.getKey(), entry.getValue().isCompletedExceptionally()); AssertExtensions.assertThrows( "Request was not failed with a CancellationException after close for request " + entry.getKey(), entry.getValue()::join, ex -> ex instanceof CancellationException); } }
From source file:de.ii.xtraplatform.feature.provider.pgis.FeatureProviderPgis.java
private CompletionStage<Done> createFeatureStream(FeatureQuery query, FeatureConsumer featureConsumer) { Optional<SqlFeatureSource> featureSource = Optional.ofNullable(featureSources.get(query.getType())); if (!featureSource.isPresent()) { CompletableFuture<Done> promise = new CompletableFuture<>(); promise.completeExceptionally(new IllegalStateException("No features available for type")); return promise; }/*from w w w .j ava2 s . c o m*/ return featureSource.get().runQuery(query, featureConsumer); }