Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:com.microsoft.azure.servicebus.samples.deadletterqueue.DeadletterQueue.java

CompletableFuture<Void> exceedMaxDelivery(String connectionString, String queueName) throws Exception {
    IMessageReceiver receiver = ClientFactory.createMessageReceiverFromConnectionStringBuilder(
            new ConnectionStringBuilder(connectionString, "BasicQueue"), ReceiveMode.PEEKLOCK);

    while (true) {
        IMessage msg = receiver.receive(Duration.ofSeconds(2));
        if (msg != null) {
            System.out.printf("Picked up message; DeliveryCount %d\n", msg.getDeliveryCount());
            receiver.abandon(msg.getLockToken());
        } else {/*  www.ja v a  2  s .  c  o  m*/
            break;
        }
    }
    receiver.close();

    IMessageReceiver deadletterReceiver = ClientFactory.createMessageReceiverFromConnectionStringBuilder(
            new ConnectionStringBuilder(connectionString, "BasicQueue/$deadletterqueue"), ReceiveMode.PEEKLOCK);
    while (true) {
        IMessage msg = deadletterReceiver.receive(Duration.ofSeconds(2));
        if (msg != null) {
            System.out.printf("\nDeadletter message:\n");
            if (msg.getProperties() != null) {
                for (String prop : msg.getProperties().keySet()) {
                    System.out.printf("\t%s=%s\n", prop, msg.getProperties().get(prop));
                }
            }
            deadletterReceiver.complete(msg.getLockToken());
        } else {
            break;
        }
    }
    deadletterReceiver.close();
    return CompletableFuture.completedFuture(null);
}

From source file:io.symcpe.hendrix.api.dao.TestRulesManager.java

@Before
public void before() {
    em = emf.createEntityManager();//from  w w w  . j  a  v a  2 s .com
    when(am.getEM()).thenReturn(em);
    when(am.getRuleTopicName()).thenReturn("ruleTopic");
    when(am.getTemplateTopicName()).thenReturn("templateTopic");
    when(am.getKafkaProducer()).thenReturn(producer);

    when(producer.send(any())).thenReturn(
            CompletableFuture.completedFuture(new RecordMetadata(new TopicPartition("ruleTopic", 2), 1, 1)));
}

From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java

@Override
public CompletableFuture<Long> countObjects() {
    //count the number of objects in the dir
    Long count = 0L;//from w ww .jav  a2 s  . com
    try {
        final RemoteIterator<FileStatus> it = fileContext.listStatus(new Path(output_directory));
        while (it.hasNext()) {
            it.next();
            count++;
        }
    } catch (IllegalArgumentException | IOException e) {
        return FutureUtils.returnError(e);
    }

    return CompletableFuture.completedFuture(count);
}

From source file:com.zero_x_baadf00d.play.module.redis.RedisModuleImpl.java

/**
 * Build a basic instance with injected dependency.
 *
 * @param lifecycle     The current application lifecyle
 * @param configuration The current application configuration
 * @since 16.03.09/*from  w  ww  . ja v a2 s .c om*/
 */
@Inject
public RedisModuleImpl(final ApplicationLifecycle lifecycle, final Configuration configuration) {
    final String redisHost = configuration.getString(RedisModuleImpl.REDISPOOL_SERVER_HOST);
    final String redisPassword = configuration.getString(RedisModuleImpl.REDISPOOL_SERVER_PASSWORD);
    final Integer redisPort = configuration.getInt(RedisModuleImpl.REDISPOOL_SERVER_PORT, 6379);
    final Integer redisConnTimeout = configuration.getInt(RedisModuleImpl.REDISPOOL_SERVER_CONN_TIMEOUT, 0);
    final Integer redisConnTotal = configuration.getInt(RedisModuleImpl.REDISPOOL_SERVER_CONN_TOTAL, 64);
    final Integer redisConnMaxIdle = configuration.getInt(RedisModuleImpl.REDISPOOL_SERVER_CONN_MAXIDLE, 16);
    final Integer redisConnMinIdle = configuration.getInt(RedisModuleImpl.REDISPOOL_SERVER_CONN_MINIDLE,
            redisConnMaxIdle / 2);
    this.redisDefaultDb = configuration.getInt(RedisModuleImpl.REDISPOOL_SERVER_DB_DEFAULT, null);
    if (redisHost != null) {
        final JedisPoolConfig poolConfig = new JedisPoolConfig();
        poolConfig.setMinIdle(redisConnMinIdle > 0 ? redisConnMinIdle : 1);
        poolConfig.setMaxIdle(redisConnMaxIdle > 0 ? redisConnMaxIdle : 1);
        poolConfig.setMaxTotal(redisConnTotal > 0 ? redisConnTotal : 1);
        if (redisPassword != null && redisPassword.length() > 0) {
            this.redisPool = new JedisPool(poolConfig, redisHost, redisPort, redisConnTimeout, redisPassword);
        } else {
            this.redisPool = new JedisPool(poolConfig, redisHost, redisPort, redisConnTimeout);
        }
        RedisModuleImpl.LOG.info("Redis connected at {}", String.format("redis://%s:%d", redisHost, redisPort));
    } else {
        throw new RuntimeException("Redis module is not properly configured");
    }
    lifecycle.addStopHook(() -> {
        RedisModuleImpl.LOG.info("Shutting down Redis");
        this.redisPool.close();
        return CompletableFuture.completedFuture(null);
    });
}

From source file:com.spotify.styx.api.MiddlewaresTest.java

@Test
public void testValidClientNoBlacklist() {
    Supplier<Optional<List<String>>> supplier = Optional::empty;
    RequestContext requestContext = mockRequestContext(true);
    CompletionStage completionStage = CompletableFuture
            .completedFuture(Response.forStatus(Status.OK.withReasonPhrase("")));
    assertThat(Middlewares.clientValidator(supplier).apply(mockInnerHandler(requestContext, completionStage))
            .invoke(requestContext), equalTo(completionStage));
}

From source file:com.arpnetworking.clusteraggregator.http.Routes.java

private CompletionStage<HttpResponse> process(final HttpRequest request) {
    if (HttpMethods.GET.equals(request.method())) {
        if (_healthCheckPath.equals(request.getUri().path())) {
            return ask("/user/status", new Status.HealthRequest(), Boolean.FALSE)
                    .thenApply(isHealthy -> HttpResponse.create()
                            .withStatus(isHealthy ? StatusCodes.OK : StatusCodes.INTERNAL_SERVER_ERROR)
                            .addHeader(PING_CACHE_CONTROL_HEADER)
                            .withEntity(JSON_CONTENT_TYPE, ByteString.fromString(
                                    "{\"status\":\"" + (isHealthy ? HEALTHY_STATE : UNHEALTHY_STATE) + "\"}")));
        } else if (_statusPath.equals(request.getUri().path())) {
            return ask("/user/status", new Status.StatusRequest(), (StatusResponse) null).thenApply(status -> {
                try {
                    return HttpResponse.create().withEntity(JSON_CONTENT_TYPE,
                            ByteString.fromString(_objectMapper.writeValueAsString(status)));
                } catch (final IOException e) {
                    LOGGER.error().setMessage("Failed to serialize status").setThrowable(e).log();
                    return HttpResponse.create().withStatus(StatusCodes.INTERNAL_SERVER_ERROR);
                }//from  www.j  a  v  a  2s  . c  o  m
            });
        }
    }
    return CompletableFuture.completedFuture(HttpResponse.create().withStatus(404));
}

From source file:io.pravega.controller.server.eventProcessor.AutoScaleRequestHandler.java

private CompletableFuture<Void> processScaleDown(final AutoScaleEvent request, final ScalingPolicy policy,
        final OperationContext context) {
    log.debug("scale down request received for stream {} segment {}", request.getStream(),
            request.getSegmentNumber());
    if (policy.getType().equals(ScalingPolicy.Type.FIXED_NUM_SEGMENTS)) {
        return CompletableFuture.completedFuture(null);
    }//from   w  w w  . ja  v  a  2 s  .co m

    return streamMetadataStore
            .markCold(request.getScope(), request.getStream(), request.getSegmentNumber(),
                    request.isSilent() ? Long.MAX_VALUE : request.getTimestamp() + REQUEST_VALIDITY_PERIOD,
                    context, executor)
            .thenCompose(x -> streamMetadataStore.getActiveSegments(request.getScope(), request.getStream(),
                    context, executor))
            .thenApply(activeSegments -> {
                assert activeSegments != null;
                final Optional<Segment> currentOpt = activeSegments.stream()
                        .filter(y -> y.getNumber() == request.getSegmentNumber()).findAny();
                if (!currentOpt.isPresent() || activeSegments.size() == policy.getMinNumSegments()) {
                    // if we are already at min-number of segments, we cant scale down, we have put the marker,
                    // we should simply return and do nothing.
                    return null;
                } else {
                    final List<Segment> candidates = activeSegments.stream()
                            .filter(z -> z.getKeyEnd() == currentOpt.get().getKeyStart()
                                    || z.getKeyStart() == currentOpt.get().getKeyEnd()
                                    || z.getNumber() == request.getSegmentNumber())
                            .sorted(Comparator.comparingDouble(Segment::getKeyStart))
                            .collect(Collectors.toList());
                    return new ImmutablePair<>(candidates, activeSegments.size() - policy.getMinNumSegments());
                }
            }).thenCompose(input -> {
                if (input != null && input.getLeft().size() > 1) {
                    final List<Segment> candidates = input.getLeft();
                    final int maxScaleDownFactor = input.getRight();

                    // fetch their cold status for all candidates
                    return FutureHelpers
                            .filter(candidates,
                                    candidate -> streamMetadataStore.isCold(request.getScope(),
                                            request.getStream(), candidate.getNumber(), context, executor))
                            .thenApply(segments -> {
                                if (maxScaleDownFactor == 1 && segments.size() == 3) {
                                    // Note: sorted by keystart so just pick first two.
                                    return Lists.newArrayList(segments.get(0), segments.get(1));
                                } else {
                                    return segments;
                                }
                            });
                } else {
                    return CompletableFuture.completedFuture(null);
                }
            }).thenCompose(toMerge -> {
                if (toMerge != null && toMerge.size() > 1) {
                    toMerge.forEach(x -> {
                        log.debug("merging stream {}: segment {} ", request.getStream(), x.getNumber());
                    });

                    final ArrayList<AbstractMap.SimpleEntry<Double, Double>> simpleEntries = new ArrayList<>();
                    double min = toMerge.stream().mapToDouble(Segment::getKeyStart).min().getAsDouble();
                    double max = toMerge.stream().mapToDouble(Segment::getKeyEnd).max().getAsDouble();
                    simpleEntries.add(new AbstractMap.SimpleEntry<>(min, max));
                    final ArrayList<Integer> segments = new ArrayList<>();
                    toMerge.forEach(segment -> segments.add(segment.getNumber()));
                    return postScaleRequest(request, segments, simpleEntries);
                } else {
                    return CompletableFuture.completedFuture(null);
                }
            });
}

From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java

@Override
public ManagementFuture<Boolean> optimizeQuery(List<String> ordered_field_list) {
    return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(ordered_field_list.isEmpty()));
}

From source file:io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask.java

private CompletableFuture<Void> processScaleDown(final AutoScaleEvent request, final ScalingPolicy policy,
        final OperationContext context) {
    log.info("scale down request received for stream {} segment {}", request.getStream(),
            request.getSegmentNumber());
    if (policy.getScaleType().equals(ScalingPolicy.ScaleType.FIXED_NUM_SEGMENTS)) {
        return CompletableFuture.completedFuture(null);
    }//from   w w w .j a  v  a 2s .  c  o  m

    return streamMetadataStore
            .markCold(request.getScope(), request.getStream(), request.getSegmentNumber(),
                    request.isSilent() ? Long.MAX_VALUE : request.getTimestamp() + REQUEST_VALIDITY_PERIOD,
                    context, executor)
            .thenCompose(x -> streamMetadataStore.getActiveSegments(request.getScope(), request.getStream(),
                    context, executor))
            .thenApply(activeSegments -> {
                assert activeSegments != null;
                final Optional<Segment> currentOpt = activeSegments.stream()
                        .filter(y -> y.getNumber() == request.getSegmentNumber()).findAny();
                if (!currentOpt.isPresent() || activeSegments.size() == policy.getMinNumSegments()) {
                    // if we are already at min-number of segments, we cant scale down, we have put the marker,
                    // we should simply return and do nothing.
                    return null;
                } else {
                    final List<Segment> candidates = activeSegments.stream()
                            .filter(z -> z.getKeyEnd() == currentOpt.get().getKeyStart()
                                    || z.getKeyStart() == currentOpt.get().getKeyEnd()
                                    || z.getNumber() == request.getSegmentNumber())
                            .sorted(Comparator.comparingDouble(Segment::getKeyStart))
                            .collect(Collectors.toList());
                    return new ImmutablePair<>(candidates, activeSegments.size() - policy.getMinNumSegments());
                }
            }).thenCompose(input -> {
                if (input != null && input.getLeft().size() > 1) {
                    final List<Segment> candidates = input.getLeft();
                    final int maxScaleDownFactor = input.getRight();

                    // fetch their cold status for all candidates
                    return Futures
                            .filter(candidates,
                                    candidate -> streamMetadataStore.isCold(request.getScope(),
                                            request.getStream(), candidate.getNumber(), context, executor))
                            .thenApply(segments -> {
                                if (maxScaleDownFactor == 1 && segments.size() == 3) {
                                    // Note: sorted by keystart so just pick first two.
                                    return Lists.newArrayList(segments.get(0), segments.get(1));
                                } else {
                                    return segments;
                                }
                            });
                } else {
                    return CompletableFuture.completedFuture(null);
                }
            }).thenCompose(toMerge -> {
                if (toMerge != null && toMerge.size() > 1) {
                    toMerge.forEach(x -> {
                        log.debug("merging stream {}: segment {} ", request.getStream(), x.getNumber());
                    });

                    final ArrayList<AbstractMap.SimpleEntry<Double, Double>> simpleEntries = new ArrayList<>();
                    double min = toMerge.stream().mapToDouble(Segment::getKeyStart).min().getAsDouble();
                    double max = toMerge.stream().mapToDouble(Segment::getKeyEnd).max().getAsDouble();
                    simpleEntries.add(new AbstractMap.SimpleEntry<>(min, max));
                    final ArrayList<Integer> segments = new ArrayList<>();
                    toMerge.forEach(segment -> segments.add(segment.getNumber()));
                    return postScaleRequest(request, segments, simpleEntries);
                } else {
                    return CompletableFuture.completedFuture(null);
                }
            });
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Void> storeCreationTimeIfAbsent(long timestamp) {
    creationTime.compareAndSet(Long.MIN_VALUE, timestamp);
    return CompletableFuture.completedFuture(null);
}