Example usage for java.time Duration ofSeconds

List of usage examples for java.time Duration ofSeconds

Introduction

In this page you can find the example usage for java.time Duration ofSeconds.

Prototype

public static Duration ofSeconds(long seconds) 

Source Link

Document

Obtains a Duration representing a number of seconds.

Usage

From source file:org.springframework.cloud.deployer.spi.cloudfoundry.CloudFoundry2620AndEarlierTaskLauncher.java

private Mono<GetDropletResponse> waitForDropletProcessing(String dropletId) {
    return requestGetDroplet(dropletId)
            .filter(response -> !response.getState().equals(org.cloudfoundry.client.v3.droplets.State.PENDING))
            .repeatWhenEmpty(50, DelayUtils.exponentialBackOff(Duration.ofSeconds(10), Duration.ofMinutes(1),
                    Duration.ofMinutes(10)));
}

From source file:org.springframework.cloud.deployer.spi.cloudfoundry.CloudFoundry2620AndEarlierTaskLauncher.java

private Mono<GetPackageResponse> waitForPackageProcessing(String packageId) {
    return requestGetPackage(packageId).filter(response -> response.getState().equals(State.READY))
            .repeatWhenEmpty(50, DelayUtils.exponentialBackOff(Duration.ofSeconds(5), Duration.ofMinutes(1),
                    Duration.ofMinutes(10)));
}

From source file:com.spotify.styx.cli.Main.java

private void activeStates() throws IOException, ExecutionException, InterruptedException {
    String uri = apiUrl("status", "activeStates");
    final String component = namespace.getString(parser.listComponent.getDest());
    if (component != null) {
        uri += "?component=" + URLEncoder.encode(component, UTF_8);
    }//from   w  ww.ja v  a2s  .c o  m

    final ByteString response = send(Request.forUri(uri).withTtl(Duration.ofSeconds(TTL_REQUEST)));
    final RunStateDataPayload runStateDataPayload = deserialize(response, RunStateDataPayload.class);
    cliOutput.printStates(runStateDataPayload);
}

From source file:com.spotify.styx.cli.Main.java

private void eventsForWorkflowInstance() throws ExecutionException, InterruptedException, IOException {
    final WorkflowInstance workflowInstance = getWorkflowInstance(namespace);
    final String component = workflowInstance.workflowId().componentId();
    final String workflow = workflowInstance.workflowId().id();
    final String parameter = workflowInstance.parameter();

    final ByteString response = send(Request.forUri(apiUrl("status", "events", component, workflow, parameter))
            .withTtl(Duration.ofSeconds(TTL_REQUEST)));

    final JsonNode jsonNode = OBJECT_MAPPER.readTree(response.toByteArray());

    if (!jsonNode.isObject()) {
        throw new RuntimeException("Invalid json returned from API");
    }//from w  ww  . ja  v  a  2 s  . c om

    final ObjectNode json = (ObjectNode) jsonNode;
    final ArrayNode events = json.withArray("events");
    final ImmutableList.Builder<CliOutput.EventInfo> eventInfos = ImmutableList.builder();
    for (JsonNode eventWithTimestamp : events) {
        final long ts = eventWithTimestamp.get("timestamp").asLong();
        final JsonNode event = eventWithTimestamp.get("event");

        String eventName;
        String eventInfo;
        try {
            Event typedEvent = OBJECT_MAPPER.convertValue(event, Event.class);
            eventName = EventUtil.name(typedEvent);
            eventInfo = CliUtil.info(typedEvent);
        } catch (IllegalArgumentException e) {
            // fall back to just inspecting the json
            eventName = event.get("@type").asText();
            eventInfo = "";
        }

        eventInfos.add(CliOutput.EventInfo.create(ts, eventName, eventInfo));
    }

    cliOutput.printEvents(eventInfos.build());
}

From source file:com.joyent.manta.client.multipart.JobsMultipartManager.java

/**
 * Waits for a multipart upload to complete. Polling every 5 seconds.
 *
 * @param <R> Return type for executeWhenTimesToPollExceeded
 * @param upload multipart upload object
 * @param executeWhenTimesToPollExceeded lambda executed when timesToPoll has been exceeded
 * @return null when under poll timeout, otherwise returns return value of executeWhenTimesToPollExceeded
 * @throws IOException thrown if there is a problem connecting to Manta
 *///from w  w w  . j a  v  a2  s . c  om
public <R> R waitForCompletion(final MantaMultipartUpload upload,
        final Function<UUID, R> executeWhenTimesToPollExceeded) throws IOException {
    Validate.notNull(upload, "Multipart upload object must not be null");

    return waitForCompletion(upload, Duration.ofSeconds(DEFAULT_SECONDS_TO_POLL), NUMBER_OF_TIMES_TO_POLL,
            executeWhenTimesToPollExceeded);

}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.TestHdfsDataWriteService.java

public void test_writerService_end2end(Optional<String> secondary, boolean is_transient)
        throws InterruptedException, ExecutionException {
    final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator;
    HfdsDataWriteService<TestBean> write_service = getWriter(
            "/test/writer/end2end/" + secondary.orElse("current") + "/", secondary, is_transient);

    //(Tidy up)/*from w ww .  j  a  v  a 2 s  .c om*/
    try {
        FileUtils.deleteDirectory(new File(temp_dir + "/data/" + write_service._bucket.full_name()));
    } catch (Exception e) {
    }

    // Check lazy initialization only kicks in once      
    Optional<IBatchSubservice<TestBean>> x = write_service.getBatchWriteSubservice();
    assertEquals(x.get(), write_service._writer.get());
    Optional<IBatchSubservice<TestBean>> y = write_service.getBatchWriteSubservice();
    assertEquals(x.get(), y.get());

    IBatchSubservice<TestBean> batch = x.get();

    // Set up properties for testing:
    batch.setBatchProperties(Optional.of(1000), Optional.of(1000L), Optional.of(Duration.ofSeconds(2L)),
            Optional.of(3));

    Thread.sleep(1000L);
    // Check there are now 3 threads
    assertEquals(3, write_service._writer.get()._state._workers.getActiveCount());

    for (int i = 0; i < 20; ++i) {
        TestBean emit = new TestBean("id" + i, "val" + i);
        if (0 == (i % 2)) {
            if (0 == ((i / 2) % 2)) {
                batch.storeObject(emit);
            } else {
                CompletableFuture<Supplier<Object>> cf = write_service.storeObject(emit);
                assertEquals(null, cf.get().get());
            }
        } else {
            if (0 == ((i / 2) % 2)) {
                batch.storeObjects(Arrays.asList(emit));
            } else {
                CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> cf = write_service
                        .storeObjects(Arrays.asList(emit));
                assertEquals(Collections.emptyList(), cf.get()._1().get());
                assertEquals(1L, cf.get()._2().get().longValue());
            }
        }
    }
    final String infix = is_transient ? IStorageService.TRANSIENT_DATA_SUFFIX_SECONDARY
            : IStorageService.STORED_DATA_SUFFIX_PROCESSED_SECONDARY;
    final String infix_name = is_transient ? "testj-testm" : "";

    // Check that initially the files are stored locally
    File init_dir = new File((temp_dir + "/data/" + write_service._bucket.full_name() + infix
            + secondary.orElse("current") + "/" + infix_name + "/.spooldir/").replace("/", File.separator));
    File final_dir = new File((temp_dir + "/data/" + write_service._bucket.full_name() + infix
            + secondary.orElse("current") + "/" + infix_name + "/all_time/").replace("/", File.separator));

    {
        int ii = 1;
        for (; ii <= 50; ++ii) {
            Thread.sleep(250L);
            if (6 == init_dir.list().length) {
                break;
            }
        }
        System.out.println("(exited from file system check after " + ii * 2.5 + " s)");
    }

    assertEquals("Needs to have 6 files, including 3x .crc: " + Arrays.toString(init_dir.list()), 6,
            init_dir.list().length); //*2 because CRC
    assertTrue(
            "Nothing in final dir: " + (final_dir.exists() ? Arrays.toString(final_dir.list()) : "(non-exist)"),
            !final_dir.exists() || final_dir.list().length == 0);

    {
        int ii = 1;
        for (; ii <= 50; ++ii) {
            Thread.sleep(2500L);
            if (0 == init_dir.list().length) {
                break;
            }
        }
        System.out.println("(exited from file system check after " + ii * 2.5 + " s)");
    }

    assertEquals(0, init_dir.list().length); //*2 because CRC
    assertEquals(6, final_dir.list().length); //*2 because CRC      

    // Change batch properties so that will segment (also check number of threads reduces)
    batch.setBatchProperties(Optional.of(10), Optional.of(1000L), Optional.of(Duration.ofSeconds(5L)),
            Optional.of(1));
    List<TestBean> l1 = IntStream.range(0, 8).boxed().map(i -> new TestBean("id" + i, "val" + i))
            .collect(Collectors.toList());
    List<TestBean> l2 = IntStream.range(8, 15).boxed().map(i -> new TestBean("id" + i, "val" + i))
            .collect(Collectors.toList());

    batch.storeObjects(l1);
    Thread.sleep(750L);
    assertEquals(6, final_dir.list().length); //*2 because CRC      
    System.out.println("Found: 6 files: " + Arrays.stream(final_dir.list()).collect(Collectors.joining(";")));

    batch.storeObjects(l2);
    System.out.println("Added 7 more objects at " + new Date());
    for (int jj = 0; jj < 5; ++jj) {
        Thread.sleep(1500L);
        if (final_dir.list().length > 6)
            break;
    }
    System.out.println("(Check init dir cleared: "
            + Arrays.stream(init_dir.list()).collect(Collectors.joining(";")) + ")");
    assertEquals("Should have 8 files: " + Arrays.stream(final_dir.list()).collect(Collectors.joining(";")), 8,
            final_dir.list().length); //*2 because CRC   

    System.out.println("(Deleting datastore and checking it's empty)");
    assertTrue("Deleted datastore: ", write_service.deleteDatastore().get()); // (just quick test since this uses handleBucketDeletion which is tested elsewhere...)
    String[] final_dir_list = Optional.ofNullable(final_dir.list()).orElse(new String[0]);
    assertEquals("Should have 0 files: " + Arrays.stream(final_dir_list).collect(Collectors.joining(";")), 0,
            final_dir_list.length); //*2 because CRC   
}

From source file:com.redskyit.scriptDriver.RunTests.java

private void sleepSeconds(long seconds) {
    try {//  w w  w  .j a  v a  2  s  .c  o  m
        Sleeper.SYSTEM_SLEEPER.sleep(Duration.ofSeconds(seconds));
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.archiva.repository.maven2.MavenRepositoryProvider.java

@Override
public void updateRemoteInstance(EditableRemoteRepository repo, RemoteRepositoryConfiguration cfg)
        throws RepositoryException {
    setBaseConfig(repo, cfg);//from  w ww.j av a  2 s  . co  m
    repo.setCheckPath(cfg.getCheckPath());
    repo.setSchedulingDefinition(cfg.getRefreshCronExpression());
    try {
        repo.setLocation(new URI(cfg.getUrl()));
    } catch (UnsupportedURIException | URISyntaxException e) {
        log.error("Could not set remote url " + cfg.getUrl());
        throw new RepositoryException("The url config is not a valid uri: " + cfg.getUrl());
    }
    repo.setTimeout(Duration.ofSeconds(cfg.getTimeout()));
    RemoteIndexFeature remoteIndexFeature = repo.getFeature(RemoteIndexFeature.class).get();
    remoteIndexFeature.setDownloadRemoteIndex(cfg.isDownloadRemoteIndex());
    remoteIndexFeature.setDownloadRemoteIndexOnStartup(cfg.isDownloadRemoteIndexOnStartup());
    remoteIndexFeature.setDownloadTimeout(Duration.ofSeconds(cfg.getRemoteDownloadTimeout()));
    remoteIndexFeature.setProxyId(cfg.getRemoteDownloadNetworkProxyId());
    if (cfg.isDownloadRemoteIndex()) {
        try {
            remoteIndexFeature.setIndexUri(new URI(cfg.getRemoteIndexUrl()));
        } catch (URISyntaxException e) {
            log.error("Could not set remote index url " + cfg.getRemoteIndexUrl());
            remoteIndexFeature.setDownloadRemoteIndex(false);
            remoteIndexFeature.setDownloadRemoteIndexOnStartup(false);
        }
    }
    for (Object key : cfg.getExtraHeaders().keySet()) {
        repo.addExtraHeader(key.toString(), cfg.getExtraHeaders().get(key).toString());
    }
    for (Object key : cfg.getExtraParameters().keySet()) {
        repo.addExtraParameter(key.toString(), cfg.getExtraParameters().get(key).toString());
    }
    PasswordCredentials credentials = new PasswordCredentials("", new char[0]);
    if (cfg.getPassword() != null && cfg.getUsername() != null) {
        credentials.setPassword(cfg.getPassword().toCharArray());
        credentials.setUsername(cfg.getUsername());
        repo.setCredentials(credentials);
    } else {
        credentials.setPassword(new char[0]);
    }
    IndexCreationFeature indexCreationFeature = repo.getFeature(IndexCreationFeature.class).get();
    if (cfg.getIndexDir() != null) {
        indexCreationFeature.setIndexPath(getURIFromString(cfg.getIndexDir()));
    }
    if (cfg.getPackedIndexDir() != null) {
        indexCreationFeature.setPackedIndexPath(getURIFromString(cfg.getPackedIndexDir()));
    }
    log.debug("Updated remote instance {}", repo);
}

From source file:org.apache.samza.runtime.LocalApplicationRunner.java

@Override
public void waitForFinish() {
    this.waitForFinish(Duration.ofSeconds(0));
}