Example usage for com.google.common.base Stopwatch elapsed

List of usage examples for com.google.common.base Stopwatch elapsed

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch elapsed.

Prototype

@CheckReturnValue
public long elapsed(TimeUnit desiredUnit) 

Source Link

Document

Returns the current elapsed time shown on this stopwatch, expressed in the desired time unit, with any fraction rounded down.

Usage

From source file:com.google.api.control.Client.java

/**
 * Process a check request.// w  ww .  j a v a  2s  .  c  o  m
 *
 * The {@code req} is first passed to the {@code CheckAggregator}. If there is a valid cached
 * response, that is returned, otherwise a response is obtained from the transport.
 *
 * @param req a {@link CheckRequest}
 * @return a {@link CheckResponse} or {@code null} if none was cached and there was a transport
 *         failure
 */
public @Nullable CheckResponse check(CheckRequest req) {
    Preconditions.checkState(running, "Cannot check if it's not running");
    statistics.totalChecks.incrementAndGet();
    Stopwatch w = Stopwatch.createStarted(ticker);
    CheckResponse resp = checkAggregator.check(req);
    statistics.totalCheckCacheLookupTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
    if (resp != null) {
        statistics.checkHits.incrementAndGet();
        if (log.isLoggable(Level.FINER)) {
            log.log(Level.FINER, String.format("using cached check response for %s: %s", req, resp));
        }
        return resp;
    }

    // Application code should not fail (or be blocked) because check request's do not succeed.
    // Instead they should fail open so here just simply log the error and return None to indicate
    // that no response was obtained.
    try {
        w.reset().start();
        resp = transport.services().check(serviceName, req).execute();
        statistics.totalCheckTransportTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
        checkAggregator.addResponse(req, resp);
        return resp;
    } catch (IOException e) {
        log.log(Level.SEVERE, String.format("direct send of a check request %s failed because of %s", req, e));
        return null;
    }
}

From source file:com.google.api.control.Client.java

/**
 * Process a report request./*  w  ww .  j  a  va2s  .com*/
 *
 * The {@code req} is first passed to the {@code ReportAggregator}. It will either be aggregated
 * with prior requests or sent immediately
 *
 * @param req a {@link ReportRequest}
 */
public void report(ReportRequest req) {
    Preconditions.checkState(running, "Cannot report if it's not running");
    statistics.totalReports.incrementAndGet();
    statistics.reportedOperations.addAndGet(req.getOperationsCount());
    Stopwatch w = Stopwatch.createStarted(ticker);
    boolean reported = reportAggregator.report(req);
    statistics.totalReportCacheUpdateTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
    if (!reported) {
        try {
            statistics.directReports.incrementAndGet();
            w.reset().start();
            transport.services().report(serviceName, req).execute();
            statistics.totalTransportedReportTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
        } catch (IOException e) {
            log.log(Level.SEVERE,
                    String.format("direct send of a report request %s failed because of %s", req, e));
        }
    }

    if (isRunningSchedulerDirectly()) {
        try {
            scheduler.run(false /* don't block */);
        } catch (InterruptedException e) {
            log.log(Level.SEVERE, String.format("direct run of scheduler failed because of %s", e));
        }
    }
    logStatistics();
}

From source file:org.apache.drill.exec.store.schedule.OldAssignmentCreator.java

OldAssignmentCreator(List<DrillbitEndpoint> incomingEndpoints, List<T> units) {
    logger.debug("Assigning {} units to {} endpoints", units.size(), incomingEndpoints.size());
    Stopwatch watch = new Stopwatch();

    Preconditions.checkArgument(incomingEndpoints.size() <= units.size(),
            String.format("Incoming endpoints %d " + "is greater than number of row groups %d",
                    incomingEndpoints.size(), units.size()));
    this.mappings = ArrayListMultimap.create();
    this.endpoints = Lists.newLinkedList(incomingEndpoints);

    ArrayList<T> rowGroupList = new ArrayList<>(units);
    for (double cutoff : ASSIGNMENT_CUTOFFS) {
        scanAndAssign(rowGroupList, cutoff, false, false);
    }/*from w ww  . jav a2s  . com*/
    scanAndAssign(rowGroupList, 0.0, true, false);
    scanAndAssign(rowGroupList, 0.0, true, true);

    logger.debug("Took {} ms to apply assignments", watch.elapsed(TimeUnit.MILLISECONDS));
    Preconditions.checkState(rowGroupList.isEmpty(),
            "All readEntries should be assigned by now, but some are still unassigned");
    Preconditions.checkState(!units.isEmpty());

}

From source file:org.openqa.selenium.javascript.ClosureTestStatement.java

@Override
public void evaluate() throws Throwable {
    URL testUrl = filePathToUrlFn.apply(testPath);
    LOG.info("Running: " + testUrl);

    Stopwatch stopwatch = Stopwatch.createStarted();

    WebDriver driver = driverSupplier.get();

    // Attempt to make the window as big as possible.
    try {/* w w  w. j  av a2 s  .c  om*/
        driver.manage().window().maximize();
    } catch (RuntimeException ignored) {
        // We tried.
    }

    JavascriptExecutor executor = (JavascriptExecutor) driver;
    // Avoid Safari JS leak between tests.
    executor.executeScript("if (window && window.top) window.top.G_testRunner = null");

    try {
        driver.get(testUrl.toString());
    } catch (WebDriverException e) {
        fail("Test failed to load: " + e.getMessage());
    }

    while (!getBoolean(executor, Query.IS_FINISHED)) {
        long elapsedTime = stopwatch.elapsed(TimeUnit.SECONDS);
        if (timeoutSeconds > 0 && elapsedTime > timeoutSeconds) {
            throw new JavaScriptAssertionError("Tests timed out after " + elapsedTime + " s");
        }
        TimeUnit.MILLISECONDS.sleep(100);
    }

    if (!getBoolean(executor, Query.IS_SUCCESS)) {
        String report = getString(executor, Query.GET_REPORT);
        throw new JavaScriptAssertionError(report);
    }
}

From source file:org.apache.bookkeeper.bookie.IndexInMemPageMgr.java

/**
 * Grab ledger entry page whose first entry is <code>pageEntry</code>.
 * If the page doesn't existed before, we allocate a memory page.
 * Otherwise, we grab a clean page and read it from disk.
 *
 * @param ledger Ledger Id//from  w  ww.j  a  v  a 2s .c  o  m
 * @param pageEntry Start entry of this entry page.
 */
private LedgerEntryPage grabLedgerEntryPage(long ledger, long pageEntry) throws IOException {
    LedgerEntryPage lep = grabCleanPage(ledger, pageEntry);
    try {
        // should get the up to date page from the persistence manager
        // before we put it into table otherwise we would put
        // an empty page in it
        Stopwatch readPageStopwatch = Stopwatch.createStarted();
        boolean isNewPage = indexPersistenceManager.updatePage(lep);
        if (!isNewPage) {
            ledgerCacheReadPageStats.registerSuccessfulEvent(readPageStopwatch.elapsed(TimeUnit.MICROSECONDS),
                    TimeUnit.MICROSECONDS);
        }
    } catch (IOException ie) {
        // if we grab a clean page, but failed to update the page
        // we should put this page in the free page list so that it
        // can be reassigned to the next grabPage request
        lep.releasePageNoCallback();
        pageMapAndList.addToListOfFreePages(lep);
        throw ie;
    }
    LedgerEntryPage oldLep;
    if (lep != (oldLep = pageMapAndList.putPage(lep))) {
        // if we grab a clean page, but failed to put it in the cache
        // we should put this page in the free page list so that it
        // can be reassigned to the next grabPage request
        lep.releasePageNoCallback();
        pageMapAndList.addToListOfFreePages(lep);
        // Increment the use count of the old lep because this is unexpected
        oldLep.usePage();
        lep = oldLep;
    }
    return lep;
}

From source file:fr.ens.transcriptome.aozan.fastqscreen.FastqScreen.java

/**
 * Mode single-end : execute fastqscreen.
 * @param fastqRead1 fastq read1 file input for mapper
 * @param fastqRead2 fastq read2 file input for mapper
 * @param fastqSample instance to describe fastq sample
 * @param genomes list or reference genome, used by mapper
 * @param genomeSample genome reference corresponding to sample
 * @param isPairedMode true if a pair-end run and option paired mode equals
 *          true else false//w w  w .j av  a  2 s. c o  m
 * @throws AozanException
 */
public FastqScreenResult execute(final File fastqRead1, final File fastqRead2, final FastqSample fastqSample,
        final List<String> genomes, final String genomeSample, final boolean isPairedMode)
        throws AozanException {

    // Timer
    final Stopwatch timer = Stopwatch.createStarted();

    final FastqScreenPseudoMapReduce pmr = new FastqScreenPseudoMapReduce(this.tmpDir, isPairedMode,
            this.mapperName, this.mapperArgument);

    try {

        if (isPairedMode) {
            pmr.doMap(fastqRead1, fastqRead2, genomes, genomeSample, this.confThreads);
        } else {
            pmr.doMap(fastqRead1, genomes, genomeSample, this.confThreads);
        }

        LOGGER.fine("FASTQSCREEN : step map for " + fastqSample.getKeyFastqSample() + " in mode "
                + (isPairedMode ? "paired" : "single") + " on genome(s) " + genomes + " in "
                + toTimeHumanReadable(timer.elapsed(TimeUnit.MILLISECONDS)));

        timer.reset();
        timer.start();

        pmr.doReduce(new File(this.tmpDir + "/outputDoReduce.txt"));

        LOGGER.fine("FASTQSCREEN : step reduce for " + fastqSample.getKeyFastqSample() + " in mode "
                + (isPairedMode ? "paired" : "single") + " in "
                + toTimeHumanReadable(timer.elapsed(TimeUnit.MILLISECONDS)));

        // Remove temporary output file use in map-reduce step
        final File f = new File(this.tmpDir + "/outputDoReduce.txt");
        if (!f.delete()) {
            LOGGER.warning("Fastqscreen : fail to delete file " + f.getAbsolutePath());
        }

    } catch (final IOException e) {
        throw new AozanException(e);

    } finally {
        timer.stop();
    }

    return pmr.getFastqScreenResult();
}

From source file:org.agatom.springatom.data.oid.creators.DefaultSOidCreator.java

@Override
public SOid fromString(final String from) throws Exception {
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug(String.format("fromString(from=%s)", from));
    }/*  w  w  w .j  ava 2 s  .c o  m*/
    final SOid oid;
    final Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        final String split[] = from.split(":");
        Assert.isTrue(split.length == 3,
                String.format("OID[%s] is invalid, it should be in format A:B:C", from));

        final String oidPrefix = split[TYPE_PREFIX_INDEX];
        final Class<?> oidClass = Class.forName(split[CLASS_NAME_INDEX]);
        final String oidId = split[ID_INDEX];

        oid = this.getOidObject(oidPrefix, oidClass, oidId);
    } catch (Exception exp) {
        LOGGER.error(String.format("fromString(from=%s) failed...", from), Throwables.getRootCause(exp));
        throw exp;
    }

    stopwatch.stop();

    if (LOGGER.isTraceEnabled()) {
        final long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
        LOGGER.trace(String.format("fromString(from=%s) to SOid(oid=%s) took %d ms", from, oid, elapsed));
    }

    return oid;
}

From source file:net.conquiris.search.AbstractSearcher.java

public final <T> ItemResult<T> getFirst(final HitMapper<T> mapper, final Query query,
        final @Nullable Filter filter, final @Nullable Sort sort, final @Nullable Highlight highlight) {
    return perform(new Op<ItemResult<T>>() {
        public ItemResult<T> perform(IndexSearcher searcher) throws Exception {
            Stopwatch w = Stopwatch.createStarted();
            Query rewritten = searcher.rewrite(query);
            TopDocs docs = getTopDocs(searcher, query, filter, sort, 1);
            if (docs.totalHits > 0) {
                ScoreDoc sd = docs.scoreDocs[0];
                HighlightedQuery highlighted = MoreObjects.firstNonNull(highlight, Highlight.no())
                        .highlight(rewritten);
                float score = sd.score;
                T item = map(searcher, sd, highlighted, mapper);
                return ItemResult.found(docs.totalHits, score, w.elapsed(TimeUnit.MILLISECONDS), item);
            } else {
                return ItemResult.notFound(w.elapsed(TimeUnit.MILLISECONDS));
            }/* w  ww  .j a  va2s  .  com*/
        }
    });
}

From source file:benchmarkio.benchmark.report.LoggingReport.java

@Override
public void aggregateAndPrintResults(final CoordinatorType coordinatorType,
        final CompletionService<Histogram> executorCompletionService, final int numTasks,
        final long totalNumberOfMessages, final Stopwatch stopwatch) {

    // Used to accumulate results from all histograms.
    final Histogram totalHistogram = Histograms.create();

    for (int i = 0; i < numTasks; i++) {
        try {//from w w  w.j  a  v a2s .co  m
            final Future<Histogram> histogramFuture = executorCompletionService.take();
            totalHistogram.add(histogramFuture.get());
        } catch (final InterruptedException e) {
            log.error("Failed to retrieve data, got inturrupt signal", e);
            Thread.currentThread().interrupt();

            break;
        } catch (final ExecutionException e) {
            log.error("Failed to retrieve data", e);
        }
    }

    stopwatch.stop();
    final long durationInSeconds = stopwatch.elapsed(TimeUnit.SECONDS);
    final long durationInMs = stopwatch.elapsed(TimeUnit.MILLISECONDS);
    // Using the durationInMs, since I would loose precision when using durationInSeconds.
    final long throughputPerSecond = 1000 * totalNumberOfMessages / durationInMs;

    final long min = totalHistogram.getMinValue();
    final double percentile25 = totalHistogram.getPercentileAtOrBelowValue(25);
    final double percentile50 = totalHistogram.getPercentileAtOrBelowValue(50);
    final double percentile75 = totalHistogram.getPercentileAtOrBelowValue(75);
    final double percentile99 = totalHistogram.getPercentileAtOrBelowValue(99);
    final long max = totalHistogram.getMaxValue();
    final double mean = totalHistogram.getMean();
    final double stdDev = totalHistogram.getStdDeviation();

    log.info("=======================================");
    if (coordinatorType == CoordinatorType.PRODUCER) {
        log.info("PRODUCER STATS");
    } else {
        log.info("CONSUMER STATS");
    }
    log.info("=======================================");
    log.info("All units are {} unless stated otherwise", Consts.TIME_UNIT_FOR_REPORTING);
    log.info("DURATION (SECOND):      {}", durationInSeconds);
    log.info("THROUGHPUT / SECOND:    {}", throughputPerSecond);
    log.info("MIN:                    {}", min);
    log.info("25th percentile:        {}", percentile25);
    log.info("50th percentile:        {}", percentile50);
    log.info("75th percentile:        {}", percentile75);
    log.info("99th percentile:        {}", percentile99);
    log.info("MAX:                    {}", max);
    log.info("MEAN:                   {}", mean);
    log.info("STD DEVIATION:          {}", stdDev);
    log.info("\n\n\n");
}

From source file:com.xiaomi.linden.service.CoreLindenServiceImpl.java

@Override
public Future<Response> handleClusterIndexRequest(String content) {
    final Stopwatch sw = Stopwatch.createStarted();
    Future<Response> responseFuture;
    try {//from  www. j a va2s .  com
        responseFuture = Future.value(lindenCluster.index(content));
        metricsManager.time(sw.elapsed(TimeUnit.NANOSECONDS), "index");
        return responseFuture;
    } catch (Exception e) {
        String errorStackInfo = Throwables.getStackTraceAsString(e);
        LOGGER.error("Handle json cluster failed, content : {} - error : {}", content, errorStackInfo);
        responseFuture = ResponseUtils.buildFailedFutureResponse(errorStackInfo);
        metricsManager.time(sw.elapsed(TimeUnit.NANOSECONDS), "failureIndex");
        return responseFuture;
    }
}