List of usage examples for com.google.common.base Stopwatch elapsed
@CheckReturnValue public long elapsed(TimeUnit desiredUnit)
From source file:org.glowroot.container.Threads.java
public static void preShutdownCheck(Collection<Thread> preExistingThreads) throws InterruptedException { // give the test 5 seconds to shutdown any threads they may have created, e.g. give tomcat // time to shutdown when testing tomcat plugin Stopwatch stopwatch = Stopwatch.createStarted(); List<Thread> nonPreExistingThreads; List<Thread> rogueThreads; do {/*from ww w . ja v a2s . com*/ nonPreExistingThreads = getNonPreExistingThreads(preExistingThreads); rogueThreads = Lists.newArrayList(); for (Thread thread : nonPreExistingThreads) { if (isRogueThread(thread)) { rogueThreads.add(thread); } } // check total number of threads to make sure Glowroot is not creating too many // // currently, the seven threads are: // // Glowroot-Background-0 // Glowroot-Background-1 // H2 Log Writer GLOWROOT // H2 File Lock Watchdog <lock db file> // Glowroot-Http-Boss // Glowroot-Http-Worker-0 // Generate Seed if (rogueThreads.isEmpty() && nonPreExistingThreads.size() <= 7) { // success return; } // wait a few milliseconds before trying again Thread.sleep(10); } while (stopwatch.elapsed(SECONDS) < 5); // failure if (!rogueThreads.isEmpty()) { throw new RogueThreadsException(rogueThreads); } else { throw new TooManyThreadsException(nonPreExistingThreads); } }
From source file:org.apache.beam.runners.spark.translation.streaming.WatermarkSyncedDStream.java
@Override public scala.Option<RDD<WindowedValue<T>>> compute(final Time validTime) { final long batchTime = validTime.milliseconds(); LOG.trace("BEFORE waiting for watermark sync, " + "LastWatermarkedBatchTime: {}, current batch time: {}", GlobalWatermarkHolder.getLastWatermarkedBatchTime(), batchTime); final Stopwatch stopwatch = Stopwatch.createStarted(); awaitWatermarkSyncWith(batchTime);/* w ww. j a v a 2s . com*/ stopwatch.stop(); LOG.info("Waited {} millis for watermarks to sync up with the current batch ({})", stopwatch.elapsed(TimeUnit.MILLISECONDS), batchTime); LOG.info("Watermarks are now: {}", GlobalWatermarkHolder.get(batchDuration)); LOG.trace("AFTER waiting for watermark sync, " + "LastWatermarkedBatchTime: {}, current batch time: {}", GlobalWatermarkHolder.getLastWatermarkedBatchTime(), batchTime); final RDD<WindowedValue<T>> rdd = generateRdd(); isFirst = false; return scala.Option.apply(rdd); }
From source file:benchmarkio.consumer.kafka.KafkaMessageConsumer.java
@Override public Histogram call() throws Exception { // Note that this is a polling consumer and will be terminated // whenever the Consts.POLLING_CONSUMER_MAX_IDLE_TIME_MS passes and no new messages have arrived. while (true) { try {/*from www. j a v a2 s. c om*/ final Map<String, ConsumerRecords<String, String>> records = consumer .poll(Consts.POLLING_CONSUMER_MAX_IDLE_TIME_MS); final Map<TopicPartition, Long> processedOffsets = new HashMap<TopicPartition, Long>(); for (final Entry<String, ConsumerRecords<String, String>> recordMetadata : records.entrySet()) { final List<ConsumerRecord<String, String>> recordsPerTopic = recordMetadata.getValue() .records(); for (int i = 0; i < recordsPerTopic.size(); i++) { final ConsumerRecord<String, String> record = recordsPerTopic.get(i); // process record processedOffsets.put(record.topicAndPartition(), record.offset()); // Start final Stopwatch stopwatch = Stopwatch.createStarted(); final Object message = record.value(); // End stopwatch.stop(); histogram.recordValue(stopwatch.elapsed(Consts.TIME_UNIT_FOR_REPORTING)); } } } catch (final Exception ex) { logger.error("Failed to consume messages: ", ex); break; } } return histogram; }
From source file:com.spotify.ffwd.kafka.KafkaPluginSink.java
private AsyncFuture<Void> send(final Iterator<List<KeyedMessage<Integer, byte[]>>> batches) { final UUID id = UUID.randomUUID(); return async.call(new Callable<Void>() { @Override/*from w w w .ja v a 2 s.c o m*/ public Void call() throws Exception { final List<Long> times = new ArrayList<>(); log.info("{}: Start sending of batch", id); while (batches.hasNext()) { final Stopwatch watch = Stopwatch.createStarted(); producer.send(batches.next()); times.add(watch.elapsed(TimeUnit.MILLISECONDS)); } log.info("{}: Done sending batch (timings in ms: {})", id, times); return null; } }, executorService); }
From source file:brooklyn.util.time.Duration.java
public boolean isLongerThan(Stopwatch stopwatch) { return isLongerThan(Duration.millis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); }
From source file:com.linagora.scheduling.FutureTestListener.java
State getNextState(int timeout, TimeUnit unit) throws Exception { Stopwatch start = Stopwatch.createStarted(); try {//from w ww. j a v a 2 s . c om State state = states.poll(timeout, unit); if (state == null) { throw new TimeoutException(); } return state; } finally { System.out.println("next state in : " + start.elapsed(TimeUnit.MILLISECONDS)); } }
From source file:com.palantir.atlasdb.keyvalue.cassandra.jmx.CassandraJmxCompactionManager.java
public void performTombstoneCompaction(long timeoutInSeconds, String keyspace, String tableName) throws InterruptedException, TimeoutException { Stopwatch stopWatch = Stopwatch.createStarted(); if (!removeHintedHandoff(timeoutInSeconds)) { return;/*from w ww. java 2s.co m*/ } log.info("All hinted handoff deletion tasks are completed."); long elapsedSeconds = stopWatch.elapsed(TimeUnit.SECONDS); long remainingTimeoutSeconds = timeoutInSeconds - elapsedSeconds; if (remainingTimeoutSeconds <= 0) { throw new TimeoutException(String.format("Task execution timeout in {} seconds. Timeout seconds:{}.", elapsedSeconds, timeoutInSeconds)); } // ALL HINTED HANDOFFS NEED TO BE DELETED BEFORE MOVING TO TOMBSTONE COMPACTION TASK if (!deleteTombstone(keyspace, tableName, remainingTimeoutSeconds)) { return; } log.info("All compaction tasks are completed."); }
From source file:org.apache.drill.exec.physical.impl.xsort.BatchGroup.java
public void addBatch(VectorContainer newContainer) throws IOException { assert fs != null; assert path != null; if (outputStream == null) { outputStream = fs.create(path);//from w w w . ja va 2 s . co m } int recordCount = newContainer.getRecordCount(); WritableBatch batch = WritableBatch.getBatchNoHVWrap(recordCount, newContainer, false); VectorAccessibleSerializable outputBatch = new VectorAccessibleSerializable(batch, allocator); Stopwatch watch = new Stopwatch(); watch.start(); outputBatch.writeToStream(outputStream); newContainer.zeroVectors(); logger.debug("Took {} us to spill {} records", watch.elapsed(TimeUnit.MICROSECONDS), recordCount); spilledBatches++; }
From source file:brooklyn.util.time.Duration.java
public boolean isShorterThan(Stopwatch stopwatch) { return isShorterThan(Duration.millis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); }
From source file:org.apache.samza.util.EmbeddedTaggedRateLimiter.java
@Override public Map<String, Integer> acquire(Map<String, Integer> tagToCreditsMap, long timeout, TimeUnit unit) { ensureTagsAreValid(tagToCreditsMap); long timeoutInNanos = NANOSECONDS.convert(timeout, unit); Stopwatch stopwatch = Stopwatch.createStarted(); return tagToCreditsMap.entrySet().stream().map(e -> { String tag = e.getKey();/*from w w w. j a v a 2 s. c om*/ int requiredCredits = e.getValue(); long remainingTimeoutInNanos = Math.max(0L, timeoutInNanos - stopwatch.elapsed(NANOSECONDS)); com.google.common.util.concurrent.RateLimiter rateLimiter = tagToRateLimiterMap.get(tag); int availableCredits = rateLimiter.tryAcquire(requiredCredits, remainingTimeoutInNanos, NANOSECONDS) ? requiredCredits : 0; return new ImmutablePair<>(tag, availableCredits); }).collect(Collectors.toMap(ImmutablePair::getKey, ImmutablePair::getValue)); }