Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:org.apache.samza.system.eventhub.producer.EventHubSystemProducer.java

@Override
public synchronized CompletableFuture<Void> sendAsync(String source, OutgoingMessageEnvelope envelope) {
    LOG.debug(String.format("Trying to send %s", envelope));
    if (!isStarted) {
        throw new SamzaException("Trying to call send before the producer is started.");
    }/*from   www .  ja  va  2 s .  c om*/
    if (!isInitialized) {
        // lazy initialization on the first send
        init();
    }

    String streamId = config.getStreamId(envelope.getSystemStream().getStream());

    if (!perStreamEventHubClientManagers.containsKey(streamId)) {
        String msg = String.format("Trying to send event to a destination {%s} that is not registered.",
                streamId);
        throw new SamzaException(msg);
    }

    EventData eventData = createEventData(streamId, envelope);
    // SAMZA-1654: waiting for the client library to expose the API to calculate the exact size of the AMQP message
    // https://github.com/Azure/azure-event-hubs-java/issues/305
    int eventDataLength = eventData.getBytes() == null ? 0 : eventData.getBytes().length;

    // If the maxMessageSize is lesser than zero, then it means there is no message size restriction.
    if (this.maxMessageSize > 0 && eventDataLength > this.maxMessageSize) {
        LOG.info("Received a message with size {} > maxMessageSize configured {(}), Skipping it",
                eventDataLength, this.maxMessageSize);
        eventSkipRate.get(streamId).inc();
        aggEventSkipRate.inc();
        return CompletableFuture.completedFuture(null);
    }

    eventWriteRate.get(streamId).inc();
    aggEventWriteRate.inc();
    eventByteWriteRate.get(streamId).inc(eventDataLength);
    aggEventByteWriteRate.inc(eventDataLength);
    EventHubClientManager ehClient = perStreamEventHubClientManagers.get(streamId);

    // Async send call
    return sendToEventHub(streamId, eventData, getEnvelopePartitionId(envelope), ehClient.getEventHubClient());
}

From source file:org.apache.samza.table.caching.TestCachingTable.java

private void doTestCacheOps(boolean isWriteAround) {
    CachingTableDescriptor desc = new CachingTableDescriptor("1", createDummyTableDescriptor("realTable"),
            createDummyTableDescriptor("cacheTable"));
    if (isWriteAround) {
        desc.withWriteAround();// w  ww  .j  a  v  a 2s.co  m
    }

    Context context = new MockContext();
    final ReadWriteTable cacheTable = getMockCache().getLeft();

    final ReadWriteTable realTable = mock(ReadWriteTable.class);

    doAnswer(invocation -> {
        String key = invocation.getArgumentAt(0, String.class);
        return CompletableFuture.completedFuture("test-data-" + key);
    }).when(realTable).getAsync(any());

    doReturn(CompletableFuture.completedFuture(null)).when(realTable).putAsync(any(), any());

    doAnswer(invocation -> {
        String tableId = invocation.getArgumentAt(0, String.class);
        if (tableId.equals("realTable")) {
            // cache
            return realTable;
        } else if (tableId.equals("cacheTable")) {
            return cacheTable;
        }

        Assert.fail();
        return null;
    }).when(context.getTaskContext()).getTable(anyString());

    when(context.getContainerContext().getContainerMetricsRegistry()).thenReturn(new NoOpMetricsRegistry());

    Map<String, String> tableConfig = desc.toConfig(new MapConfig());
    when(context.getJobContext().getConfig()).thenReturn(new MapConfig(tableConfig));

    CachingTableProvider tableProvider = new CachingTableProvider(desc.getTableId());
    tableProvider.init(context);

    CachingTable cachingTable = (CachingTable) tableProvider.getTable();

    Assert.assertEquals("test-data-1", cachingTable.get("1"));
    verify(realTable, times(1)).getAsync(any());
    verify(cacheTable, times(1)).get(any()); // cache miss
    verify(cacheTable, times(1)).put(any(), any());
    Assert.assertEquals(cachingTable.hitRate(), 0.0, 0.0); // 0 hit, 1 request
    Assert.assertEquals(cachingTable.missRate(), 1.0, 0.0);

    Assert.assertEquals("test-data-1", cachingTable.get("1"));
    verify(realTable, times(1)).getAsync(any()); // no change
    verify(cacheTable, times(2)).get(any());
    verify(cacheTable, times(1)).put(any(), any()); // no change
    Assert.assertEquals(0.5, cachingTable.hitRate(), 0.0); // 1 hit, 2 requests
    Assert.assertEquals(0.5, cachingTable.missRate(), 0.0);

    cachingTable.put("2", "test-data-XXXX");
    verify(cacheTable, times(isWriteAround ? 1 : 2)).put(any(), any());
    verify(realTable, times(1)).putAsync(any(), any());

    if (isWriteAround) {
        Assert.assertEquals("test-data-2", cachingTable.get("2")); // expects value from table
        verify(realTable, times(2)).getAsync(any()); // should have one more fetch
        Assert.assertEquals(cachingTable.hitRate(), 0.33, 0.1); // 1 hit, 3 requests
    } else {
        Assert.assertEquals("test-data-XXXX", cachingTable.get("2")); // expect value from cache
        verify(realTable, times(1)).getAsync(any()); // no change
        Assert.assertEquals(cachingTable.hitRate(), 0.66, 0.1); // 2 hits, 3 requests
    }
}

From source file:org.apache.samza.table.caching.TestCachingTable.java

@Test
public void testNonexistentKeyInTable() {
    ReadWriteTable<String, String> table = mock(ReadWriteTable.class);
    doReturn(CompletableFuture.completedFuture(null)).when(table).getAsync(any());
    ReadWriteTable<String, String> cache = getMockCache().getLeft();
    CachingTable<String, String> cachingTable = new CachingTable<>("myTable", table, cache, false);
    initTables(cachingTable);//from  www.  j a  v a2 s .c om
    Assert.assertNull(cachingTable.get("abc"));
    verify(cache, times(1)).get(any());
    Assert.assertNull(cache.get("abc"));
    verify(cache, times(0)).put(any(), any());
}

From source file:org.apache.samza.table.caching.TestCachingTable.java

@Test
public void testKeyEviction() {
    ReadWriteTable<String, String> table = mock(ReadWriteTable.class);
    doReturn(CompletableFuture.completedFuture("3")).when(table).getAsync(any());
    ReadWriteTable<String, String> cache = mock(ReadWriteTable.class);

    // no handler added to mock cache so get/put are noop, this can simulate eviction
    CachingTable<String, String> cachingTable = new CachingTable<>("myTable", table, cache, false);
    initTables(cachingTable);/*from  ww w. j  av a2  s .co m*/
    cachingTable.get("abc");
    verify(table, times(1)).getAsync(any());

    // get() should go to table again
    cachingTable.get("abc");
    verify(table, times(2)).getAsync(any());
}

From source file:org.apache.samza.table.caching.TestCachingTable.java

/**
 * Testing caching in a more realistic scenario with Guava cache + remote table
 *///from w  w  w.  jav  a 2s . c  o  m
@Test
public void testGuavaCacheAndRemoteTable() throws Exception {
    String tableId = "testGuavaCacheAndRemoteTable";
    Cache<String, String> guavaCache = CacheBuilder.newBuilder().initialCapacity(100).build();
    final ReadWriteTable<String, String> guavaTable = new GuavaCacheTable<>(tableId + "-cache", guavaCache);

    // It is okay to share rateLimitHelper and async helper for read/write in test
    TableRateLimiter<String, String> rateLimitHelper = mock(TableRateLimiter.class);
    TableReadFunction<String, String> readFn = mock(TableReadFunction.class);
    TableWriteFunction<String, String> writeFn = mock(TableWriteFunction.class);
    final RemoteTable<String, String> remoteTable = new RemoteTable<>(tableId + "-remote", readFn, writeFn,
            rateLimitHelper, rateLimitHelper, Executors.newSingleThreadExecutor(), null, null, null,
            Executors.newSingleThreadExecutor());

    final CachingTable<String, String> cachingTable = new CachingTable<>(tableId, remoteTable, guavaTable,
            false);

    initTables(cachingTable, guavaTable, remoteTable);

    // 3 per readable table (9)
    // 5 per read/write table (15)
    verify(metricsRegistry, times(24)).newCounter(any(), anyString());

    // 2 per readable table (6)
    // 5 per read/write table (15)
    // 1 per remote readable table (1)
    // 1 per remote read/write table (1)
    verify(metricsRegistry, times(23)).newTimer(any(), anyString());

    // 1 per guava table (1)
    // 3 per caching table (2)
    verify(metricsRegistry, times(4)).newGauge(anyString(), any());

    // GET
    doReturn(CompletableFuture.completedFuture("bar")).when(readFn).getAsync(any());
    Assert.assertEquals(cachingTable.getAsync("foo").get(), "bar");
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo"), "bar");

    // PUT
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAsync(any(), any());
    cachingTable.putAsync("foo", "baz").get();
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo"), "baz");

    // DELETE
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAsync(any());
    cachingTable.deleteAsync("foo").get();
    // Ensure cache is updated
    Assert.assertNull(guavaCache.getIfPresent("foo"));

    // GET-ALL
    Map<String, String> records = new HashMap<>();
    records.put("foo1", "bar1");
    records.put("foo2", "bar2");
    doReturn(CompletableFuture.completedFuture(records)).when(readFn).getAllAsync(any());
    Assert.assertEquals(cachingTable.getAllAsync(Arrays.asList("foo1", "foo2")).get(), records);
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo1"), "bar1");
    Assert.assertEquals(guavaCache.getIfPresent("foo2"), "bar2");

    // GET-ALL with partial miss
    doReturn(CompletableFuture.completedFuture(Collections.singletonMap("foo3", "bar3"))).when(readFn)
            .getAllAsync(any());
    records = cachingTable.getAllAsync(Arrays.asList("foo1", "foo2", "foo3")).get();
    Assert.assertEquals(records.get("foo3"), "bar3");
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo3"), "bar3");

    // Calling again for the same keys should not trigger IO, ie. no exception is thrown
    CompletableFuture<String> exFuture = new CompletableFuture<>();
    exFuture.completeExceptionally(new RuntimeException("Test exception"));
    doReturn(exFuture).when(readFn).getAllAsync(any());
    cachingTable.getAllAsync(Arrays.asList("foo1", "foo2", "foo3")).get();

    // Partial results should throw
    try {
        cachingTable.getAllAsync(Arrays.asList("foo1", "foo2", "foo5")).get();
        Assert.fail();
    } catch (Exception e) {
    }

    // PUT-ALL
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAllAsync(any());
    List<Entry<String, String>> entries = new ArrayList<>();
    entries.add(new Entry<>("foo1", "bar111"));
    entries.add(new Entry<>("foo2", "bar222"));
    cachingTable.putAllAsync(entries).get();
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo1"), "bar111");
    Assert.assertEquals(guavaCache.getIfPresent("foo2"), "bar222");

    // PUT-ALL with delete
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAllAsync(any());
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAllAsync(any());
    entries = new ArrayList<>();
    entries.add(new Entry<>("foo1", "bar111"));
    entries.add(new Entry<>("foo2", null));
    cachingTable.putAllAsync(entries).get();
    // Ensure cache is updated
    Assert.assertNull(guavaCache.getIfPresent("foo2"));

    // At this point, foo1 and foo3 should still exist
    Assert.assertNotNull(guavaCache.getIfPresent("foo1"));
    Assert.assertNotNull(guavaCache.getIfPresent("foo3"));

    // DELETE-ALL
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAllAsync(any());
    cachingTable.deleteAllAsync(Arrays.asList("foo1", "foo3")).get();
    // Ensure foo1 and foo3 are gone
    Assert.assertNull(guavaCache.getIfPresent("foo1"));
    Assert.assertNull(guavaCache.getIfPresent("foo3"));
}

From source file:org.apache.samza.table.caching.TestCachingTable.java

@Test
public void testTimerDisabled() throws Exception {
    String tableId = "testTimerDisabled";

    Cache<String, String> guavaCache = CacheBuilder.newBuilder().initialCapacity(100).build();
    final ReadWriteTable<String, String> guavaTable = new GuavaCacheTable<>(tableId, guavaCache);

    TableRateLimiter<String, String> rateLimitHelper = mock(TableRateLimiter.class);

    TableReadFunction<String, String> readFn = mock(TableReadFunction.class);
    doReturn(CompletableFuture.completedFuture("")).when(readFn).getAsync(any());

    TableWriteFunction<String, String> writeFn = mock(TableWriteFunction.class);
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAsync(any(), any());
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAsync(any());

    final RemoteTable<String, String> remoteTable = new RemoteTable<>(tableId, readFn, writeFn, rateLimitHelper,
            rateLimitHelper, Executors.newSingleThreadExecutor(), null, null, null,
            Executors.newSingleThreadExecutor());

    final CachingTable<String, String> cachingTable = new CachingTable<>(tableId, remoteTable, guavaTable,
            false);//from  ww w . j ava2s.  c o  m

    initTables(true, cachingTable, guavaTable, remoteTable);

    cachingTable.get("");
    cachingTable.getAsync("").get();
    cachingTable.getAll(Collections.emptyList());
    cachingTable.getAllAsync(Collections.emptyList());
    cachingTable.flush();
    cachingTable.put("", "");
    cachingTable.putAsync("", "");
    cachingTable.putAll(Collections.emptyList());
    cachingTable.putAllAsync(Collections.emptyList());
    cachingTable.delete("");
    cachingTable.deleteAsync("");
    cachingTable.deleteAll(Collections.emptyList());
    cachingTable.deleteAllAsync(Collections.emptyList());
}

From source file:org.eclipse.jdt.ls.core.internal.handlers.JDTLanguageServer.java

@Override
public CompletableFuture<InitializeResult> initialize(InitializeParams params) {
    logInfo(">> initialize");
    InitHandler handler = new InitHandler(pm, preferenceManager, client);
    return CompletableFuture.completedFuture(handler.initialize(params));
}

From source file:org.eclipse.smarthome.binding.mqtt.generic.internal.generic.ChannelState.java

/**
 * Removes the subscription to the state topic and resets the channelStateUpdateListener.
 *
 * @return A future that completes with true if unsubscribing from the state topic succeeded.
 *         It completes with false if no connection is established and completes exceptionally otherwise.
 */// ww  w.jav a2 s. c o  m
public CompletableFuture<@Nullable Void> stop() {
    final MqttBrokerConnection connection = this.connection;
    if (connection != null && StringUtils.isNotBlank(config.stateTopic)) {
        return connection.unsubscribe(config.stateTopic, this).thenRun(this::internalStop);
    } else {
        internalStop();
        return CompletableFuture.completedFuture(null);
    }
}

From source file:org.eclipse.smarthome.binding.mqtt.generic.internal.generic.ChannelState.java

/**
 * Subscribes to the state topic on the given connection and informs about updates on the given listener.
 *
 * @param connection A broker connection
 * @param scheduler A scheduler to realize the timeout
 * @param timeout A timeout in milliseconds. Can be 0 to disable the timeout and let the future return earlier.
 * @param channelStateUpdateListener An update listener
 * @return A future that completes with true if the subscribing worked, with false if the stateTopic is not set
 *         and exceptionally otherwise./*from w  w  w. java 2  s .c om*/
 */
public CompletableFuture<@Nullable Void> start(MqttBrokerConnection connection,
        ScheduledExecutorService scheduler, int timeout) {
    if (hasSubscribed) {
        return CompletableFuture.completedFuture(null);
    }

    this.connection = connection;

    if (StringUtils.isBlank(config.stateTopic)) {
        return CompletableFuture.completedFuture(null);
    }

    this.future = new CompletableFuture<>();
    connection.subscribe(config.stateTopic, this).thenRun(() -> {
        hasSubscribed = true;
        if (timeout > 0 && !future.isDone()) {
            this.scheduledFuture = scheduler.schedule(this::receivedOrTimeout, timeout, TimeUnit.MILLISECONDS);
        } else {
            receivedOrTimeout();
        }
    }).exceptionally(this::subscribeFail);
    return future;
}

From source file:org.eclipse.smarthome.binding.mqtt.generic.internal.handler.GenericThingHandler.java

@Override
public void dispose() {
    // Stop all MQTT subscriptions
    try {//from  w ww  . ja v a 2  s . c  om
        channelStateByChannelUID.values().stream().map(e -> e.stop())
                .reduce(CompletableFuture.completedFuture(null), (a, v) -> a.thenCompose(b -> v))
                .get(500, TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException ignore) {
    }
    // Remove all state descriptions of this handler
    channelStateByChannelUID.forEach((uid, state) -> stateDescProvider.remove(uid));
    connection = null;
    channelStateByChannelUID.clear();
    super.dispose();
}