Example usage for java.time Duration ofSeconds

List of usage examples for java.time Duration ofSeconds

Introduction

In this page you can find the example usage for java.time Duration ofSeconds.

Prototype

public static Duration ofSeconds(long seconds) 

Source Link

Document

Obtains a Duration representing a number of seconds.

Usage

From source file:io.pravega.service.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testStartAndStopContainer() throws Exception {
    @Cleanup//from w ww . ja  v a  2 s. c  o m
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that starts successfully.
    CompletableFuture<ContainerHandle> startupFuture = new CompletableFuture<>();
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);

    // Now modify the ZK entry.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Container finished starting.
    startupFuture.complete(containerHandle);
    verify(containerRegistry, timeout(10000).atLeastOnce()).startContainer(eq(2), any());

    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(2));

    // Now modify the ZK entry. Remove container 2 and add 1.
    HashMap<Host, Set<Integer>> newMapping = new HashMap<>();
    newMapping.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(1));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(newMapping));

    // Verify that stop is called and only the newly added container is in running state.
    when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));
    verify(containerRegistry, timeout(10000).atLeastOnce()).stopContainer(any(), any());

    // Using wait here to ensure the private data structure is updated.
    // TODO: Removing dependency on sleep here and other places in this class
    // - https://github.com/pravega/pravega/issues/1079
    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(1));
}

From source file:io.pravega.segmentstore.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testStartAndStopContainer() throws Exception {
    @Cleanup//from  w w w  . ja va  2 s  .  c  o  m
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that starts successfully.
    CompletableFuture<ContainerHandle> startupFuture = new CompletableFuture<>();
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    when(containerRegistry.startContainer(eq(2), any())).thenReturn(startupFuture);

    // Now modify the ZK entry.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Container finished starting.
    startupFuture.complete(containerHandle);
    verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());

    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(2));

    // Now modify the ZK entry. Remove container 2 and add 1.
    HashMap<Host, Set<Integer>> newMapping = new HashMap<>();
    newMapping.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(1));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(newMapping));

    // Verify that stop is called and only the newly added container is in running state.
    when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null));
    verify(containerRegistry, timeout(1000).atLeastOnce()).stopContainer(any(), any());

    // Using wait here to ensure the private data structure is updated.
    // TODO: Removing dependency on sleep here and other places in this class
    // - https://github.com/pravega/pravega/issues/1079
    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
    assertTrue(segMonitor.getRegisteredContainers().contains(1));
}

From source file:ch.cyberduck.core.b2.B2ExceptionMappingService.java

@Override
public BackgroundException map(final B2ApiException e) {
    final StringBuilder buffer = new StringBuilder();
    this.append(buffer, e.getMessage());
    switch (e.getStatus()) {
    case HttpStatus.SC_UNAUTHORIZED:
        // 401 Unauthorized.
        if ("expired_auth_token".equalsIgnoreCase(e.getCode())) {
            try {
                session.login(new DisabledPasswordStore(), new DisabledLoginCallback(),
                        new DisabledCancelCallback());
                return new RetriableAccessDeniedException(buffer.toString());
            } catch (BackgroundException f) {
                log.warn(String.format("Attempt to renew expired auth token failed. %s", f.getDetail()));
            }//from w  w w.j a  v  a 2s.  c o  m

        }
        return new LoginFailureException(buffer.toString(), e);
    case HttpStatus.SC_FORBIDDEN:
        if ("cap_exceeded".equalsIgnoreCase(e.getCode()) || "storage_cap_exceeded".equalsIgnoreCase(e.getCode())
                || "transaction_cap_exceeded".equalsIgnoreCase(e.getCode())) {// Reached the storage cap that you set
            return new QuotaException(buffer.toString(), e);
        }
        return new AccessDeniedException(buffer.toString(), e);
    case HttpStatus.SC_NOT_FOUND:
        return new NotfoundException(buffer.toString(), e);
    case HttpStatus.SC_INSUFFICIENT_SPACE_ON_RESOURCE:
        return new QuotaException(buffer.toString(), e);
    case HttpStatus.SC_INSUFFICIENT_STORAGE:
        return new QuotaException(buffer.toString(), e);
    case HttpStatus.SC_PAYMENT_REQUIRED:
        return new QuotaException(buffer.toString(), e);
    case HttpStatus.SC_BAD_REQUEST:
        if ("file_not_present".equalsIgnoreCase(e.getCode())) {
            return new NotfoundException(buffer.toString(), e);
        }
        if ("cap_exceeded".equalsIgnoreCase(e.getCode())) {// Reached the storage cap that you set
            return new QuotaException(buffer.toString(), e);
        }
        if ("bad_request".equalsIgnoreCase(e.getCode())) {
            if ("sha1 did not match data received".equalsIgnoreCase(e.getMessage())) {
                return new ChecksumException(buffer.toString(), e);
            }
        }
        return new InteroperabilityException(buffer.toString(), e);
    case HttpStatus.SC_METHOD_NOT_ALLOWED:
        return new InteroperabilityException(buffer.toString(), e);
    case HttpStatus.SC_NOT_IMPLEMENTED:
        return new InteroperabilityException(buffer.toString(), e);
    case HttpStatus.SC_SERVICE_UNAVAILABLE:
    case HttpStatus.SC_INTERNAL_SERVER_ERROR:
        return new ConnectionRefusedException(buffer.toString(), e);
    default:
        if (e.getRetry() != null) {
            // Too Many Requests (429)
            return new RetriableAccessDeniedException(buffer.toString(), Duration.ofSeconds(e.getRetry()), e);
        }
        return new InteroperabilityException(buffer.toString(), e);
    }
}

From source file:org.esbtools.eventhandler.lightblue.config.EventHandlerConfigEntity.java

@Override
@Transient/*from  w  w  w .java 2  s  .  c  o m*/
public Duration getDocumentEventExpireThreshold() {
    return documentEventExpireThresholdSeconds == null ? null
            : Duration.ofSeconds(documentEventExpireThresholdSeconds);
}

From source file:org.drugis.addis.config.MainConfig.java

@Bean
public CacheManager cacheManager() {
    long numberOfCacheItems = 100;
    long fourHours = 60 * 60 * 4;

    CacheConfiguration<Object, Object> cacheConfiguration = CacheConfigurationBuilder
            .newCacheConfigurationBuilder(Object.class, Object.class,
                    ResourcePoolsBuilder.heap(numberOfCacheItems))
            .withExpiry(ExpiryPolicyBuilder.timeToLiveExpiration(Duration.ofSeconds(fourHours))).build();

    Map<String, CacheConfiguration<?, ?>> caches = createCacheConfigurations(cacheConfiguration);

    EhcacheCachingProvider provider = (EhcacheCachingProvider) Caching.getCachingProvider();
    DefaultConfiguration configuration = new DefaultConfiguration(caches, provider.getDefaultClassLoader());
    return new JCacheCacheManager(provider.getCacheManager(provider.getDefaultURI(), configuration));
}

From source file:reactor.ipc.netty.tcp.TcpServerTests.java

@Test(timeout = 10000)
public void testHang() throws Exception {
    NettyContext httpServer = HttpServer.create(opts -> opts.listen("0.0.0.0", 0))
            .newRouter(r -> r.get("/data", (request, response) -> {
                return response.send(Mono.empty());
            })).block(Duration.ofSeconds(30));
    httpServer.dispose();/*from   w ww. j a va  2  s.  c o  m*/
}

From source file:reactor.ipc.netty.tcp.TcpServerTests.java

@Test
public void exposesRemoteAddress() throws InterruptedException {
    final int port = SocketUtils.findAvailableTcpPort();
    final CountDownLatch latch = new CountDownLatch(1);

    NettyContext server = TcpServer.create(port).newHandler((in, out) -> {
        InetSocketAddress remoteAddr = in.remoteAddress();
        assertNotNull("remote address is not null", remoteAddr.getAddress());
        latch.countDown();//from   ww  w  .j a v a2 s  .c o  m

        return Flux.never();
    }).block(Duration.ofSeconds(30));

    NettyContext client = TcpClient.create(port)
            .newHandler((in, out) -> out.sendString(Flux.just("Hello World!"))).block(Duration.ofSeconds(30));

    assertTrue("latch was counted down", latch.await(5, TimeUnit.SECONDS));

    client.dispose();
    server.dispose();
}

From source file:org.springframework.cloud.deployer.spi.cloudfoundry.CloudFoundry2620AndEarlierTaskLauncher.java

/**
 * Set up a reactor flow to launch a task. Before launch, check if the base application exists. If not, deploy then launch task.
 *
 * @param request description of the application to be launched
 * @return name of the launched task, returned without waiting for reactor pipeline to complete
 *///from   www.  j  a v a2s .c  o m
@Override
public String launch(AppDeploymentRequest request) {
    return getOrDeployApplication(request).then(application -> launchTask(application.getId(), request))
            .doOnSuccess(r -> logger.info("Task {} launch successful", request))
            .doOnError(t -> logger.error(String.format("Task %s launch failed", request), t))
            .block(Duration.ofSeconds(this.deploymentProperties.getApiTimeout()));
}

From source file:com.publictransitanalytics.scoregenerator.datalayer.directories.GTFSReadingStopTimesDirectory.java

private void parseFrequenciesFile(final ImmutableMultimap.Builder<String, FrequencyRecord> builder,
        final Reader frequenciesReader) throws FileNotFoundException, IOException {

    final CSVParser frequenciesParser = new CSVParser(frequenciesReader, CSVFormat.DEFAULT.withHeader());
    final List<CSVRecord> frequenciesRecords = frequenciesParser.getRecords();

    for (CSVRecord record : frequenciesRecords) {
        final String tripId = record.get("trip_id");

        final FrequencyRecord frequencyRecord = new FrequencyRecord(tripId,
                TransitTime.parse(record.get("start_time")), TransitTime.parse(record.get("end_time")),
                Duration.ofSeconds(Long.parseLong(record.get("headway_secs"))));
        builder.put(tripId, frequencyRecord);
    }//from w ww.  jav a 2 s. c  o  m
}