Example usage for java.util.concurrent TimeUnit MINUTES

List of usage examples for java.util.concurrent TimeUnit MINUTES

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MINUTES.

Prototype

TimeUnit MINUTES

To view the source code for java.util.concurrent TimeUnit MINUTES.

Click Source Link

Document

Time unit representing sixty seconds.

Usage

From source file:com.teradata.benchto.driver.execution.ExecutionDriver.java

private void executeBenchmarks(List<Benchmark> benchmarks) {
    List<BenchmarkExecutionResult> benchmarkExecutionResults = newArrayList();
    int benchmarkOrdinalNumber = 1;
    for (Benchmark benchmark : benchmarks) {
        if (isTimeLimitEnded()) {
            LOG.warn("Time limit for running benchmarks has run out");
            break;
        }//  w  w  w  .  j av  a2 s  .  c o  m

        executeHealthCheck(benchmark);
        benchmarkExecutionResults
                .add(benchmarkExecutionDriver.execute(benchmark, benchmarkOrdinalNumber++, benchmarks.size()));
        benchmarkStatusReporter.processCompletedFutures();
    }

    List<BenchmarkExecutionResult> failedBenchmarkResults = benchmarkExecutionResults.stream()
            .filter(benchmarkExecutionResult -> !benchmarkExecutionResult.isSuccessful()).collect(toList());

    benchmarkStatusReporter.awaitAllFutures(10, TimeUnit.MINUTES);

    if (!failedBenchmarkResults.isEmpty()) {
        throw new FailedBenchmarkExecutionException(failedBenchmarkResults, benchmarkExecutionResults.size());
    }
}

From source file:com.kinesis.datavis.utils.DynamoDBUtils.java

public void createCountTableIfNotExists(String tableName, String attrHash, String attrRange) {
    List<KeySchemaElement> ks = new ArrayList<>();
    ks.add(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName(attrHash));
    ks.add(new KeySchemaElement().withKeyType(KeyType.RANGE).withAttributeName(attrRange));

    ArrayList<AttributeDefinition> attributeDefinitions = new ArrayList<>();
    attributeDefinitions.add(/* w  ww  .  j  a v  a  2s .com*/
            new AttributeDefinition().withAttributeName(attrHash).withAttributeType(ScalarAttributeType.S));
    // Range key must be a String. DynamoDBMapper translates Dates to ISO8601 strings.
    attributeDefinitions.add(
            new AttributeDefinition().withAttributeName(attrRange).withAttributeType(ScalarAttributeType.S));
    // Create the table with enough write IOPS to handle 5 distinct resources updated every 1 second:
    // 1 update/second * 5 resources = 5 write IOPS.
    // The provisioned throughput will need to be adjusted if the cadinality of the input data or the interval for
    // updates changes.
    CreateTableRequest createTableRequest = new CreateTableRequest().withTableName(tableName)
            .withProvisionedThroughput(new ProvisionedThroughput(10L, 5L)).withKeySchema(ks)
            .withAttributeDefinitions(attributeDefinitions);

    try {
        dynamoDB.createTable(createTableRequest);

        LOG.info(String.format("Created DynamoDB table: %s. Waiting up to 5 minutes for it to become ACTIVE...",
                tableName));
        // Wait 5 minutes for the table to become ACTIVE
        if (!waitUntilTableIsActive(tableName, 10, TimeUnit.MINUTES.toSeconds(5))) {
            throw new IllegalStateException(
                    String.format("Timed out while waiting for DynamoDB table %s to become ready", tableName));
        }
    } catch (ResourceInUseException ex) {
        // Assume table exists and is ready to use
    }
}

From source file:net.openhft.chronicle.logger.slf4j.Slf4jVanillaChronicleBinaryLoggerPerfTest.java

@Test
public void testMultiThreadLogging() throws IOException, InterruptedException {
    warmup(LoggerFactory.getLogger("perf-binary-vanilla-chronicle"));

    final int RUNS = 300000;
    final int THREADS = Runtime.getRuntime().availableProcessors();

    for (int size : new int[] { 64, 128, 256 }) {
        {/*from   www  . j a v  a2 s.  com*/
            final long start = System.nanoTime();

            ExecutorService es = Executors.newFixedThreadPool(THREADS);
            for (int t = 0; t < THREADS; t++) {
                es.submit(new RunnableLogger(RUNS, size, "perf-binary-vanilla-chronicle"));
            }

            es.shutdown();
            es.awaitTermination(2, TimeUnit.MINUTES);

            final long time = System.nanoTime() - start;

            System.out.printf(
                    "ChronicleLog.MT (runs=%d, min size=%03d, elapsed=%.3f ms) took an average of %.3f us per entry\n",
                    RUNS, size, time / 1e6, time / 1e3 / (RUNS * THREADS));
        }
    }

    ChronicleTools.deleteOnExit(basePath("perf-binary-vanilla-chronicle"));
}

From source file:com.yougou.api.interceptor.impl.AuthInterceptor.java

/**
 * ??API?/*from w w  w.  j  a v a  2s  .c  o m*/
 * 
 * @param apiId
 * @param appKey
 * @return
 */
private boolean isLicense(String apiId, String appKey) {
    List<Map<String, String>> list = null;
    try {
        list = (List<Map<String, String>>) redisTemplate.opsForValue().get(API_LICENSE_REDIS_KEY);
    } catch (Exception e) {
        e.printStackTrace();
    }
    if (CollectionUtils.isEmpty(list)) {
        list = this.queryApiLicenseList();
        redisTemplate.opsForValue().set(API_LICENSE_REDIS_KEY, list, 5, TimeUnit.MINUTES);
        businessLogger
                .log(MessageFormat.format("?API_LICENSE_REDIS_KEY?{0}",
                        CollectionUtils.isNotEmpty(list) ? list.size() : 0));
    }

    return this.containValue(apiId, appKey, list);
}

From source file:com.google.gerrit.elasticsearch.AbstractElasticIndex.java

AbstractElasticIndex(@GerritServerConfig Config cfg, FillArgs fillArgs, SitePaths sitePaths, Schema<V> schema,
        String indexName) {/*from   w  w  w  .j ava  2s.co m*/
    this.fillArgs = fillArgs;
    this.sitePaths = sitePaths;
    this.schema = schema;
    this.gson = new GsonBuilder().setFieldNamingPolicy(LOWER_CASE_WITH_UNDERSCORES).create();
    this.queryBuilder = new ElasticQueryBuilder();
    String protocol = getRequiredConfigOption(cfg, "protocol");
    String hostname = getRequiredConfigOption(cfg, "hostname");
    String port = getRequiredConfigOption(cfg, "port");

    this.indexName = String.format("%s%s%04d", Strings.nullToEmpty(cfg.getString("index", null, "prefix")),
            indexName, schema.getVersion());

    // By default Elasticsearch has a 1s delay before changes are available in
    // the index.  Setting refresh(true) on calls to the index makes the index
    // refresh immediately.
    //
    // Discovery should be disabled during test mode to prevent spurious
    // connection failures caused by the client starting up and being ready
    // before the test node.
    //
    // This setting should only be set to true during testing, and is not
    // documented.
    this.refresh = cfg.getBoolean("index", "elasticsearch", "test", false);

    String url = buildUrl(protocol, hostname, port);
    JestClientFactory factory = new JestClientFactory();
    factory.setHttpClientConfig(new HttpClientConfig.Builder(url).multiThreaded(true).discoveryEnabled(!refresh)
            .discoveryFrequency(1L, TimeUnit.MINUTES).build());
    client = (JestHttpClient) factory.getObject();
}

From source file:edu.cmu.lti.oaqa.bioasq.concept.retrieval.GoPubMedConceptRetrievalExecutor.java

@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
    AbstractQuery aquery = TypeUtil.getAbstractQueries(jcas).stream().findFirst().get();
    String queryString = bopQueryStringConstructor.construct(aquery).replaceAll("[^A-Za-z0-9_\\-\"]+", " ");
    LOG.info("Query string: {}", queryString);
    List<ConceptSearchResult> concepts = Collections.synchronizedList(new ArrayList<>());
    ExecutorService es = Executors.newCachedThreadPool();
    for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) {
        es.execute(() -> {/*  w ww .j  av  a 2  s .c om*/
            try {
                concepts.addAll(BioASQUtil.searchOntology(service, jcas, queryString, pages, hits, ontology));
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    }
    es.shutdown();
    try {
        if (!es.awaitTermination(timeout, TimeUnit.MINUTES)) {
            LOG.warn("Timeout occurs for one or some concept retrieval services.");
        }
    } catch (InterruptedException e) {
        throw new AnalysisEngineProcessException(e);
    }
    Map<String, List<ConceptSearchResult>> onto2concepts = concepts.stream()
            .collect(groupingBy(ConceptSearchResult::getSearchId));
    for (Map.Entry<String, List<ConceptSearchResult>> entry : onto2concepts.entrySet()) {
        List<ConceptSearchResult> results = entry.getValue();
        LOG.info("Retrieved {} concepts from {}", results.size(), entry.getKey());
        if (LOG.isDebugEnabled()) {
            results.stream().limit(3).forEach(c -> LOG.debug(" - {}", TypeUtil.toString(c)));
        }
    }
    TypeUtil.rankedSearchResultsByScore(concepts, limit).forEach(ConceptSearchResult::addToIndexes);
}

From source file:me.j360.trace.autoconfiguration.ui.ZipkinUiAutoConfiguration.java

@RequestMapping(value = "/index.html", method = GET)
public ResponseEntity<Resource> serveIndex() {
    return ResponseEntity.ok().cacheControl(CacheControl.maxAge(1, TimeUnit.MINUTES)).body(indexHtml);
}

From source file:com.falcon.orca.handlers.MasterHandler.java

@Override
public void handle() {

    final ActorRef clusterManager = actorSystem.actorOf(ClusterManager.props(this.minimumNodes, hostname));

    //Register the local nodeManager
    //TODO: host and port should not be required in local nodeManager case
    final ActorRef localNodeManager = actorSystem.actorOf(NodeManager.props(hostname, 2552));
    NodeManagerCommand nodeManagerCommand = new NodeManagerCommand();
    nodeManagerCommand.setType(NodeManagerCommandType.REGISTER_TO_MASTER);
    localNodeManager.tell(nodeManagerCommand, clusterManager);

    //Read the input on console and take decision.
    BufferedReader br = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    Options options = createMasterOptions();
    printOnCmd("Welcome to ORCA type help to see what ORCA can do. You have started the node in master mode.");
    try {/* www.  ja v a 2s.  c  o  m*/
        String command = br.readLine();
        while (command != null) {
            if (!StringUtils.isEmpty(command)) {
                try {
                    String[] treatedCommandParts = treatCommands(command);
                    commandLine = commandLineParser.parse(options, treatedCommandParts);
                    if (commandLine.hasOption("start")) {
                        RunDetails runDetails = createRunDetails(commandLine);
                        ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand();
                        clusterManagerCommand.setType(ClustermanagerCommandType.START_LOAD);
                        clusterManagerCommand.putOnContext("runDetails", runDetails);
                        clusterManager.tell(clusterManagerCommand, clusterManager);
                    } else if (commandLine.hasOption("stop")) {
                        ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand();
                        clusterManagerCommand.setType(ClustermanagerCommandType.STOP_LOAD);
                        clusterManager.tell(clusterManagerCommand, clusterManager);
                    } else if (commandLine.hasOption("exit")) {
                        ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand();
                        clusterManagerCommand.setType(ClustermanagerCommandType.EXIT);
                        clusterManager.tell(clusterManagerCommand, clusterManager);
                        actorSystem.shutdown();
                        actorSystem.awaitTermination(new FiniteDuration(1, TimeUnit.MINUTES));
                        break;
                    } else if (commandLine.hasOption("pause")) {
                        ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand();
                        clusterManagerCommand.setType(ClustermanagerCommandType.PAUSE_LOAD);
                        clusterManager.tell(clusterManagerCommand, clusterManager);
                    } else if (commandLine.hasOption("resume")) {
                        ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand();
                        clusterManagerCommand.setType(ClustermanagerCommandType.RESUME_LOAD);
                        clusterManager.tell(clusterManagerCommand, clusterManager);
                    } else if (commandLine.hasOption("clusterDetails")) {
                        ClusterManagerCommand clusterManagerCommand = new ClusterManagerCommand();
                        clusterManagerCommand.setType(ClustermanagerCommandType.CLUSTER_DETAILS);
                        clusterManager.tell(clusterManagerCommand, clusterManager);
                    } else {
                        printOnCmd(printHelpMasterMode());
                    }
                } catch (ParseException pe) {
                    printOnCmd(printHelpMasterMode());
                } catch (MalformedURLException me) {
                    me.printStackTrace();
                }
            } else {
                printOnCmd("", false);
            }
            command = br.readLine();
        }
    } catch (IOException e) {
        printOnCmd("Failed to read your command, try again.");
    }
}

From source file:nl.esciencecenter.octopus.webservice.job.OctopusManager.java

/**
 * Terminates any running Octopus processes and stops the job poller.
 *//*  w  ww .  j  a va2  s.  com*/
public void stop() throws Exception {
    // TODO should I call OctopusFactory.endAll() or the octopus.end()
    octopus.end();
    executor.shutdown();
    // JobsPoller can be in middle of fetching job statuses so give it 1 minute to finish before interrupting it
    executor.awaitTermination(1, TimeUnit.MINUTES);
}