List of usage examples for com.google.common.base Stopwatch createStarted
@CheckReturnValue public static Stopwatch createStarted()
From source file:org.rustidea.parser.RsParser.java
@NotNull private ASTNode doParse(@NotNull IElementType root) { final Stopwatch stopwatch = Stopwatch.createStarted(); PsiBuilder.Marker marker = getBuilder().mark(); getModuleParser().file();//from www . ja v a 2 s. c o m marker.done(root); stopwatch.stop(); final double size = getBuilder().getCurrentOffset() / 1000.0; LOG.info(String.format("Parsed %.1f kb file in %s.", size, stopwatch)); return getBuilder().getTreeBuilt(); }
From source file:com.twitter.distributedlog.limiter.ChainedRequestLimiter.java
public void apply(Request request) throws OverCapacityException { Stopwatch stopwatch = Stopwatch.createStarted(); try {//from w w w . j a v a 2s. c o m for (RequestLimiter<Request> limiter : limiters) { limiter.apply(request); } } finally { applyTime.registerSuccessfulEvent(stopwatch.elapsed(TimeUnit.MICROSECONDS)); } }
From source file:ncmount.impl.listener.PerformanceAwareNotificationListener.java
/** * Handle notifications and measure number of notifications/second * @param notification example notification *///w ww. j av a2 s. c o m @Override public void onVrfRouteNotification(final VrfRouteNotification notification) { final long andDecrement = notifCounter.getAndDecrement(); if (andDecrement == expectedNotificationCount) { this.stopWatch = Stopwatch.createStarted(); LOG.info("First notification received at {}", stopWatch); } LOG.debug("Notification received, {} to go.", andDecrement); if (LOG.isTraceEnabled()) { LOG.trace("Notification received: {}", notification); } totalPrefixesReceived += notification.getVrfPrefixes().getVrfPrefix().size(); if (andDecrement == 1) { this.stopWatch.stop(); LOG.info("Last notification received at {}", stopWatch); LOG.info("Elapsed ms for {} notifications: {}", expectedNotificationCount, stopWatch.elapsed(TimeUnit.MILLISECONDS)); LOG.info("Performance (notifications/second): {}", (expectedNotificationCount * 1.0 / stopWatch.elapsed(TimeUnit.MILLISECONDS)) * 1000); LOG.info("Performance (prefixes/second): {}", (totalPrefixesReceived * 1.0 / stopWatch.elapsed(TimeUnit.MILLISECONDS)) * 1000); } }
From source file:com.b2international.snowowl.datastore.serviceconfig.ServiceConfigJob.java
@Override protected final IStatus run(final IProgressMonitor monitor) { final Stopwatch serviceStopwatch = Stopwatch.createStarted(); LOGGER.debug(">>> " + getName()); monitor.beginTask(getName(), IProgressMonitor.UNKNOWN); try {/*from ww w. ja v a 2 s . com*/ return initService() ? Status.OK_STATUS : Status.CANCEL_STATUS; } catch (final SnowowlServiceException e) { // TODO (apeteri): consider returning error status here throw new SnowowlRuntimeException("Error when initializing service.", e); } finally { monitor.done(); LOGGER.debug(MessageFormat.format("<<< {0} [{1}]", getName(), serviceStopwatch)); } }
From source file:com.facebook.buck.distributed.ThriftCoordinatorClient.java
public ThriftCoordinatorClient start() throws IOException { transport = new TFramedTransport(new TSocket(remoteHost, remotePort)); Stopwatch stopwatch = Stopwatch.createStarted(); while (true) { try {//from ww w . jav a2s .c o m transport.open(); break; } catch (TTransportException e) { if (stopwatch.elapsed(TimeUnit.SECONDS) > MAX_CONNECT_TIMEOUT_SECONDS) { throw new IOException( String.format("Failed to connect. Coordinator is still not healthy after [%d] seconds.", MAX_CONNECT_TIMEOUT_SECONDS)); } LOG.debug("Coordinator server currently not available. Retrying in a bit..."); try { Thread.sleep(TimeUnit.SECONDS.toMillis(RETRY_TIMEOUT_SECONDS)); } catch (InterruptedException innerException) { throw new RuntimeException(innerException); } } } TProtocol protocol = new TBinaryProtocol(transport); client = new CoordinatorService.Client(protocol); return this; }
From source file:org.grouplens.lenskit.eval.EvalTarget.java
@Override public void execute() throws BuildException { try {//from w w w .j av a 2s .c o m logger.info("beginning execution of {}", getName()); Stopwatch watch = Stopwatch.createStarted(); super.execute(); watch.stop(); logger.info("{} finished in {}", getName(), watch); if (lastTaskFuture != null) { if (!lastTaskFuture.isDone()) { logger.error("{}: future for task {} did not complete", getName(), lastTask); returnValue.setException(new RuntimeException("task future didn't complete")); } else { while (!returnValue.isDone()) { try { returnValue.set(lastTaskFuture.get()); } catch (ExecutionException ex) { returnValue.setException(ex.getCause()); } catch (InterruptedException e) { logger.warn("{}: task future get() was interrupted", getName()); returnValue.setException(e); throw new BuildException("Build task interrupted", e); } } } } else { returnValue.set(null); } } catch (RuntimeException ex) { returnValue.setException(ex); throw ex; } }
From source file:com.google.devtools.build.android.AndroidDataSerializer.java
/** * Writes all of the collected DataKey -> DataValue. * * The binary format will be: <pre> * {@link Header}// w w w. j a va2 s . c o m * {@link com.google.devtools.build.android.proto.SerializeFormat.DataKey} keys... * {@link com.google.devtools.build.android.proto.SerializeFormat.DataValue} entries... * </pre> * * The key and values will be written in comparable order, allowing for the optimization of not * converting the DataValue from binary, only writing it into a merged serialized binary. */ public void flushTo(Path out) throws IOException { Stopwatch timer = Stopwatch.createStarted(); // Ensure the parent directory exists, if any. if (out.getParent() != null) { Files.createDirectories(out.getParent()); } try (OutputStream outStream = new BufferedOutputStream( Files.newOutputStream(out, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE))) { // Set the header for the deserialization process. SerializeFormat.Header.Builder headerBuilder = Header.newBuilder().setEntryCount(entries.size()); // Create table of source paths to allow references in the serialization format via an index. ByteArrayOutputStream sourceTableOutputStream = new ByteArrayOutputStream(2048); DataSourceTable sourceTable = DataSourceTable.createAndWrite(entries, sourceTableOutputStream, headerBuilder); headerBuilder.build().writeDelimitedTo(outStream); writeKeyValuesTo(entries, outStream, sourceTable, sourceTableOutputStream.toByteArray()); } logger.fine(String.format("Serialized merged in %sms", timer.elapsed(TimeUnit.MILLISECONDS))); }
From source file:com.smithsmodding.smithscore.SmithsCore.java
@Mod.EventHandler public void preInit(FMLPreInitializationEvent event) { Stopwatch watch = Stopwatch.createStarted(); if (isInDevenvironment()) { getLogger().warn(CoreReferences.LogMarkers.PREINIT, ""); getLogger().warn(CoreReferences.LogMarkers.PREINIT, "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); getLogger().warn(CoreReferences.LogMarkers.PREINIT, "SmithsCore starting in Dev mode. Current active Features:"); getLogger().warn(CoreReferences.LogMarkers.PREINIT, " > Additional log output."); getLogger().warn(CoreReferences.LogMarkers.PREINIT, " > Debug rendering of structures."); getLogger().warn(CoreReferences.LogMarkers.PREINIT, " > Debug overlay rendering of UI components,"); getLogger().warn(CoreReferences.LogMarkers.PREINIT, "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"); getLogger().warn(CoreReferences.LogMarkers.PREINIT, ""); } else {//from w w w .j a v a2 s . co m getLogger().info(CoreReferences.LogMarkers.PREINIT, "SmithsCore starting in Normal mode."); } proxy.preInit(); watch.stop(); Long milliseconds = watch.elapsed(TimeUnit.MILLISECONDS); getLogger().info(CoreReferences.LogMarkers.PREINIT, "SmithsCore Pre-Init completed after: " + milliseconds + " mS."); }
From source file:org.factcast.store.pgsql.internal.catchup.PgCatchUpFetchPage.java
public LinkedList<Fact> fetchIdFacts(@NonNull AtomicLong serial) { Stopwatch sw = Stopwatch.createStarted(); final LinkedList<Fact> list = new LinkedList<>(jdbc.query(PgConstants.SELECT_ID_FROM_CATCHUP, createSetter(serial, pageSize), new PgIdFactExtractor(serial))); sw.stop();// w ww . ja v a 2s .co m log.debug("{} fetched next page of Ids for cid={}, limit={}, ser>{} in {}ms", req, clientId, pageSize, serial.get(), sw.elapsed(TimeUnit.MILLISECONDS)); return list; }
From source file:org.apache.distributedlog.limiter.ChainedRequestLimiter.java
public void apply(Request request) throws OverCapacityException { Stopwatch stopwatch = Stopwatch.createStarted(); try {//from w w w . j ava2 s. c om for (RequestLimiter<Request> limiter : limiters) { limiter.apply(request); } } finally { applyTime.registerSuccessfulEvent(stopwatch.elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS); } }