List of usage examples for java.util.concurrent TimeUnit SECONDS
TimeUnit SECONDS
To view the source code for java.util.concurrent TimeUnit SECONDS.
Click Source Link
From source file:com.aef.TicketGeneratorImpl.java
protected void run() throws Exception { while (isRunning()) { List<TicketOrder> tickets = ticketService.getOpenTickets(); for (TicketOrder ticketOrder : tickets) { ticketOrder.setSymbol(symbolToLekFormat(ticketOrder.getSymbol())); }//from ww w.j ava 2 s . c o m if (tickets != null && !tickets.isEmpty()) { //I want to add the tickets all the tickets at once TicketBatch ticketBatch = new TicketBatch(tickets); addToTicketQueue(ticketBatch); generateCount++; } pollCount++; LOGGER.info(String.format("Sleeping for %d seconds....", sleepInterval)); TimeUnit.SECONDS.sleep(sleepInterval); } }
From source file:net.bluemix.todo.store.ToDoStoreFactory.java
/** * Gets an instance of {@link ToDoStore}. * @return A {@link ToDoStore}./*from ww w . j a va2 s . c o m*/ */ public static ToDoStore getInstance() throws ToDoStoreException { if (instance == null) { cloudFactory = new CloudFactory(); try { Cloud cloud = cloudFactory.getCloud(); List<ServiceInfo> infos = cloud.getServiceInfos(); MongoServiceInfo mongoInfo = null; CloudantServiceInfo cloudantInfo = null; for (ServiceInfo info : infos) { if (info.getId().equals("todo-mongo-db")) { mongoInfo = (MongoServiceInfo) info; break; } if (info.getId().equals("todo-couch-db")) { cloudantInfo = (CloudantServiceInfo) info; break; } } if (mongoInfo != null) { instance = new MongoStore(getCollection(mongoInfo)); } else if (cloudantInfo != null) { instance = new CloudantStore(getWebTarget(cloudantInfo)); } else { instance = new InMemoryStore(); } } catch (CloudException e) { instance = new InMemoryStore(); } exec.scheduleAtFixedRate(new Cleanup(), PERIOD, PERIOD, TimeUnit.SECONDS); } return instance; }
From source file:com.devicehive.rpcclient.RpcClientActionTest.java
@Test public void testNotificationSearchAction() throws Exception { NotificationSearchRequest searchRequest = new NotificationSearchRequest(); searchRequest.setId(Long.MAX_VALUE); // nonexistent id searchRequest.setGuid(UUID.randomUUID().toString()); // random guid Request request = Request.newBuilder().withPartitionKey(searchRequest.getGuid()).withBody(searchRequest) .build();/*from w w w .ja v a 2 s . c o m*/ CompletableFuture<Response> future = new CompletableFuture<>(); client.call(request, future::complete); Response response = future.get(10, TimeUnit.SECONDS); NotificationSearchResponse responseBody = (NotificationSearchResponse) response.getBody(); assertTrue(responseBody.getNotifications().isEmpty()); }
From source file:co.cask.cdap.gateway.handlers.metrics.MetricsDeleteTest.java
@Test public void testContextDelete() throws Exception { // Insert some metrics MetricsCollector collector = collectionService.getCollector(MetricsScope.SYSTEM, "WCount.f.WordCounter.unique", "0"); collector.increment("process.events.processed", 6); collector.increment("process.events.out", 5); collector = collectionService.getCollector(MetricsScope.SYSTEM, "WCount.f.WordCounter.counter", "0"); collector.increment("process.events.processed", 4); collector.increment("process.events.out", 3); collector = collectionService.getCollector(MetricsScope.SYSTEM, "WCount.f.WCounter.counter", "0"); collector.increment("process.events.processed", 2); collector.increment("process.events.out", 1); // Wait for collection to happen TimeUnit.SECONDS.sleep(2); String base = "/v2/metrics/system/apps/WCount/flows"; // make sure data is there Assert.assertEquals(6, getMetricCount(base + "/WordCounter/flowlets/unique", "process.events.processed")); Assert.assertEquals(5, getMetricCount(base + "/WordCounter/flowlets/unique", "process.events.out")); Assert.assertEquals(4, getMetricCount(base + "/WordCounter/flowlets/counter", "process.events.processed")); Assert.assertEquals(3, getMetricCount(base + "/WordCounter/flowlets/counter", "process.events.out")); Assert.assertEquals(2, getMetricCount(base + "/WCounter/flowlets/counter", "process.events.processed")); Assert.assertEquals(1, getMetricCount(base + "/WCounter/flowlets/counter", "process.events.out")); // do the delete HttpResponse response = doDelete(base + "/WordCounter"); Assert.assertEquals(HttpResponseStatus.OK.getCode(), response.getStatusLine().getStatusCode()); // test correct metrics got deleted Assert.assertEquals(0, getMetricCount(base + "/WordCounter/flowlets/unique", "process.events.processed")); Assert.assertEquals(0, getMetricCount(base + "/WordCounter/flowlets/unique", "process.events.out")); Assert.assertEquals(0, getMetricCount(base + "/WordCounter/flowlets/counter", "process.events.processed")); Assert.assertEquals(0, getMetricCount(base + "/WordCounter/flowlets/counter", "process.events.out")); // test other things did not get deleted Assert.assertEquals(2, getMetricCount(base + "/WCounter/flowlets/counter", "process.events.processed")); Assert.assertEquals(1, getMetricCount(base + "/WCounter/flowlets/counter", "process.events.out")); }
From source file:com.bt.aloha.media.convedia.conference.ScheduledExecutorServiceMaxConferenceDurationScheduler.java
public void terminateConferenceAfterMaxDuration(ConferenceInfo conferenceInfo, ConferenceBean conferenceBean) { cancelTerminateConference(conferenceInfo); log.debug(String.format("Scheduling conference %s to be terminated after %s min", conferenceInfo.getId(), conferenceInfo.getMaxDurationInMinutes())); ScheduledFuture<?> future = this.executorService.schedule( new TerminateConferenceTask(conferenceInfo.getId(), conferenceBean), conferenceInfo.getMaxDurationInMinutes() * NUMBER_OF_SECONDS_IN_ONE_MINUTE, TimeUnit.SECONDS); conferenceInfo.setFuture(future);/*from ww w .j av a 2s .co m*/ }
From source file:models.ConnectionContext.java
/** * Public constructor./*ww w . j av a2 s . c o m*/ * * @param metricsFactory Instance of <code>MetricsFactory</code>. * @param connection Websocket connection to bind to. * @param processorsFactory Factory for producing the protocol's <code>MessagesProcessor</code> */ public ConnectionContext(final MetricsFactory metricsFactory, final WebSocket.Out<JsonNode> connection, final MessageProcessorsFactory processorsFactory) { _metricsFactory = metricsFactory; _connection = connection; _instrument = Akka.system().scheduler().schedule(new FiniteDuration(0, TimeUnit.SECONDS), // Initial delay new FiniteDuration(1, TimeUnit.SECONDS), // Interval getSelf(), "instrument", ExecutionContexts.global(), getSelf()); _messageProcessors = processorsFactory.create(this); _metrics = createMetrics(); }
From source file:org.qucosa.migration.routes.StagingRouteBuilder.java
@Override public void configure() throws Exception { errorHandler(deadLetterChannel("direct:dead").maximumRedeliveries(5) .redeliveryDelay(TimeUnit.SECONDS.toMillis(3)).maximumRedeliveryDelay(TimeUnit.SECONDS.toMillis(60)) .backOffMultiplier(2).asyncDelayedRedelivery().retryAttemptedLogLevel(LoggingLevel.WARN)); from("direct:dead").routeId("Failed").errorHandler(noErrorHandler()).log(LoggingLevel.ERROR, "${body}") .setBody(simple("${body} ${exception}")).to("file://target/output"); from("direct:staging:file").routeId("staging-file").log("Staging resources listed in ${body}") .process(new FileReaderProcessor()).process(new CommentedOutFilter("#")) .log("Found ${body.size} elements").split(body()).parallelProcessing().to("direct:staging"); from("direct:staging").routeId("staging").log("Staging resource: ${body}") .convertBodyTo(Opus4ResourceID.class).choice().when(simple("${body.isDocumentId}")) .to("direct:staging:document").otherwise().to("direct:staging:tenant"); from("direct:staging:tenant").routeId("stage-tenant").log("Processing elements of tenant resource: ${body}") .convertBodyTo(Opus4ResourceID.class).to("opus4:resources").log("Found ${body.size} elements") .split(body()).parallelProcessing().to("direct:staging:document"); from("direct:staging:document").routeId("stage-document").threads().convertBodyTo(Opus4ResourceID.class) .choice().when(constant(config.getBoolean("sword.purge"))) .log("Purging Fedora object qucosa:${body.identifier}").process(new PurgeFedoraObject(config)).end() .choice().when(constant(config.getBoolean("sword.slugheader"))) .setHeader("Slug", simple("qucosa:${body.identifier}")).end() .to("opus4:documents").setHeader("Qucosa-File-Url", constant(config.getString("qucosa.file.url"))) .bean(DepositMetsGenerator.class) .to("direct:deposit"); from("direct:deposit").routeId("deposit-route") .setHeader("X-No-Op", constant(config.getBoolean("sword.noop"))) .setHeader("X-On-Behalf-Of", constant(config.getString("sword.ownerID", null))) .setHeader("Content-Type", constant("application/vnd.qucosa.mets+xml")) .setHeader("Collection", constant(config.getString("sword.collection"))) .convertBodyTo(SwordDeposit.class).to("sword:deposit").throttle(5).asyncDelayed().choice() .when(constant(config.getBoolean("transforming"))) .transform(method(StagingRouteBuilder.class, "extractPID")).to("direct:transform"); }
From source file:com.microsoft.valda.oms.OmsAppender.java
private static void processQueue() { while (true) { try {// w w w.j a v a2 s. c o m LoggingEvent event = loggingEventQueue.poll(1L, TimeUnit.SECONDS); if (event != null) { instance.processEvent(event); } } catch (InterruptedException e) { // No operations. } } }
From source file:de.codecentric.batch.metrics.InfluxdbMetricsExporter.java
public InfluxdbMetricsExporter(MetricRegistry metricRegistry, final MetricReader metricReader, String server, Integer port, String dbName, String user, String password, String environment) throws Exception { Influxdb influxdb = new Influxdb(server, port, dbName, user, password); influxdb.debugJson = true;/* w ww .j a v a2 s .com*/ MetricFilter filter = new MetricFilter() { @Override public boolean matches(String name, Metric metric) { org.springframework.boot.actuate.metrics.Metric<?> bootMetric = metricReader.findOne(name); if (bootMetric.getTimestamp().after(lastExport)) { return true; } return false; } }; reporter = InfluxdbReporter.forRegistry(metricRegistry).prefixedWith(environment) .convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).filter(filter) .build(influxdb); }
From source file:org.aliuge.crawler.fetcher.Fetcher.java
public static synchronized void runProxyer() throws Exception { // ?ip?//from w w w . java2 s . c om Thread t = new Thread(new Runnable() { @SuppressWarnings("null") @Override public void run() { int count = 0; @SuppressWarnings("unused") boolean flag_fetchProxyIps = false; while (proxyerisRuning) { try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { e.printStackTrace(); } try { @SuppressWarnings("unused") Iterable<String> it = null; for (String ip : it = m.keySet()) { int t = m.get(ip); if (t > 10 && StringUtils.isNotBlank(ip)) { m.remove(ip); proxyIps.remove(ip); slog.info("?ip" + ip); } } if (proxyIps.size() < 30) { slog.info("?ip?30??"); //proxyIps.addAll(ProxyIp.fetchProxyIps(true)); proxyIps.addAll(ProxyIp.fetchProxyIps(false)); m.clear(); } } catch (Exception e) { e.printStackTrace(); } // 10???ip if (count == 10) { storeProxyIp(); count = 0; } else { count++; } } // ? ??ip storeProxyIp(); } }, "?Ip"); t.start(); }