Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:monasca.persister.repository.vertica.VerticaMetricRepo.java

private void executeBatches(String id) {

    Stopwatch sw = Stopwatch.createStarted();

    metricsBatch.execute();//from  ww  w  .ja  va  2s.  co m

    stagedDefinitionsBatch.execute();

    stagedDimensionsBatch.execute();

    stagedDefinitionDimensionsBatch.execute();

    sw.stop();

    logger.debug("[{}]: executing batches took {}: ", id, sw);

}

From source file:cosmos.mapred.MediawikiQueries.java

public long docIdFetch(Store id, Map<Column, Long> counts, long totalResults) throws Exception {
    Stopwatch sw = new Stopwatch();

    // This is dumb, I didn't pad the docids...
    String prev = "!";
    long resultCount = 0l;
    sw.start();/*w  ww .j  ava2  s . c o  m*/

    final CloseableIterable<MultimapRecord> results = this.sorts.fetch(id,
            Index.define(Defaults.DOCID_FIELD_NAME));

    for (MultimapRecord r : results) {
        sw.stop();

        resultCount++;

        String current = r.docId();
        if (prev.compareTo(current) > 0) {
            System.out.println("WOAH, got " + current + " docid which was greater than the previous " + prev);
            results.close();
            System.exit(1);
        }

        prev = current;

        sw.start();
    }

    sw.stop();

    System.out.println(
            Thread.currentThread().getName() + ": docIdFetch - Took " + sw.toString() + " to fetch results");
    logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "docIdFetch");

    results.close();

    return resultCount;
}

From source file:org.grouplens.lenskit.eval.traintest.TrainTestJob.java

@SuppressWarnings("PMD.AvoidCatchingThrowable")
private void runEvaluation() throws IOException, RecommenderBuildException {
    EventBus bus = task.getProject().getEventBus();
    bus.post(JobEvents.started(this));
    Closer closer = Closer.create();//from ww  w.  j  a v a2  s  .  c  om
    try {
        outputs = task.getOutputs().getPrefixed(algorithmInfo, dataSet);
        TableWriter userResults = outputs.getUserWriter();
        List<Object> outputRow = Lists.newArrayList();

        logger.info("Building {} on {}", algorithmInfo, dataSet);
        StopWatch buildTimer = new StopWatch();
        buildTimer.start();
        buildRecommender();
        buildTimer.stop();
        logger.info("Built {} in {}", algorithmInfo.getName(), buildTimer);

        logger.info("Measuring {} on {}", algorithmInfo.getName(), dataSet.getName());

        StopWatch testTimer = new StopWatch();
        testTimer.start();
        List<Object> userRow = Lists.newArrayList();

        List<MetricWithAccumulator<?>> accumulators = Lists.newArrayList();

        for (Metric<?> eval : outputs.getMetrics()) {
            accumulators.add(makeMetricAccumulator(eval));
        }

        LongSet testUsers = dataSet.getTestData().getUserDAO().getUserIds();
        final NumberFormat pctFormat = NumberFormat.getPercentInstance();
        pctFormat.setMaximumFractionDigits(2);
        pctFormat.setMinimumFractionDigits(2);
        final int nusers = testUsers.size();
        logger.info("Testing {} on {} ({} users)", algorithmInfo, dataSet, nusers);
        int ndone = 0;
        for (LongIterator iter = testUsers.iterator(); iter.hasNext();) {
            if (Thread.interrupted()) {
                throw new InterruptedException("eval job interrupted");
            }
            long uid = iter.nextLong();
            userRow.add(uid);
            userRow.add(null); // placeholder for the per-user time
            assert userRow.size() == 2;

            Stopwatch userTimer = Stopwatch.createStarted();
            TestUser test = getUserResults(uid);

            userRow.add(test.getTrainHistory().size());
            userRow.add(test.getTestHistory().size());

            for (MetricWithAccumulator<?> accum : accumulators) {
                List<Object> ures = accum.measureUser(test);
                if (ures != null) {
                    userRow.addAll(ures);
                }
            }
            userTimer.stop();
            userRow.set(1, userTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001);
            if (userResults != null) {
                try {
                    userResults.writeRow(userRow);
                } catch (IOException e) {
                    throw new RuntimeException("error writing user row", e);
                }
            }
            userRow.clear();

            ndone += 1;
            if (ndone % 100 == 0) {
                testTimer.split();
                double time = testTimer.getSplitTime();
                double tpu = time / ndone;
                double tleft = (nusers - ndone) * tpu;
                logger.info("tested {} of {} users ({}), ETA {}", ndone, nusers,
                        pctFormat.format(((double) ndone) / nusers),
                        DurationFormatUtils.formatDurationHMS((long) tleft));
            }
        }
        testTimer.stop();
        logger.info("Tested {} in {}", algorithmInfo.getName(), testTimer);

        writeMetricValues(buildTimer, testTimer, outputRow, accumulators);
        bus.post(JobEvents.finished(this));
    } catch (Throwable th) {
        bus.post(JobEvents.failed(this, th));
        throw closer.rethrow(th, RecommenderBuildException.class);
    } finally {
        try {
            cleanup();
        } finally {
            outputs = null;
            closer.close();
        }
    }
}

From source file:org.hashtrees.manager.HashTreesManager.java

public void synch(final ServerName sn, final long treeId, boolean doAuthenticate, SyncType syncType)
        throws IOException, SynchNotAllowedException {
    boolean synchAllowed = doAuthenticate ? authenticator.canSynch(localServer, sn) : true;
    Pair<ServerName, Long> hostNameAndTreeId = Pair.create(sn, treeId);
    if (synchAllowed) {
        boolean synced = false;
        SyncDiffResult result = null;/*from   ww  w . j  ava2  s. c  o  m*/
        notifier.preSync(treeId, sn);
        try {
            LOG.info("Syncing {}.", hostNameAndTreeId);
            Stopwatch watch = Stopwatch.createStarted();
            HashTreesRemoteClient remoteSyncClient = getHashTreeSyncClient(sn);
            result = hashTrees.synch(treeId, remoteSyncClient, syncType);
            LOG.info("Synch result for {} - {}", hostNameAndTreeId, result);
            watch.stop();
            LOG.info("Time taken for syncing ({}) (in ms) : {}", hostNameAndTreeId,
                    watch.elapsed(TimeUnit.MILLISECONDS));
            LOG.info("Syncing {} complete.", hostNameAndTreeId);
            synced = true;
        } catch (TException e) {
            LOG.error("Unable to synch remote hash tree server {} : {}", hostNameAndTreeId, e);
        } finally {
            notifier.postSync(treeId, sn, result, synced);
        }
    } else {
        LOG.error("Synch is not allowed between {} and {}", localServer, sn);
        throw new SynchNotAllowedException(localServer, sn);
    }
}

From source file:demos.BatchInsert.java

public void run() {
    try {/*from   www  . j  a  v a2  s.co  m*/
        logger.info("Preparing to insert metric data points");

        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
        Session session = cluster.connect("demo");
        PreparedStatement insert = session
                .prepare("insert into metric_data (metric_id, time, value) values (?, ?, ?)");
        Random random = new Random();
        DateTime time = DateTime.now().minusYears(1);
        final CountDownLatch latch = new CountDownLatch(NUM_INSERTS / BATCH_SIZE);

        FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {
            @Override
            public void onSuccess(ResultSet result) {
                latch.countDown();
            }

            @Override
            public void onFailure(Throwable t) {
                logger.warn("There was an error inserting data", t);
                latch.countDown();
            }
        };

        Stopwatch stopwatch = new Stopwatch().start();
        BatchStatement batch = new BatchStatement();
        for (int i = 0; i < NUM_INSERTS; ++i) {
            String metricId = "metric-" + Math.abs(random.nextInt() % NUM_METRICS);
            double value = random.nextDouble();
            batch.add(insert.bind(metricId, time.toDate(), value));
            time = time.plusSeconds(10);
            if (batch.size() == BATCH_SIZE) {
                ResultSetFuture future = session.executeAsync(batch);
                Futures.addCallback(future, callback);
                batch = new BatchStatement();
            }
        }
        latch.await();
        stopwatch.stop();

        logger.info("Finished inserting {} data points in {} ms", NUM_INSERTS,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (InterruptedException e) {
        logger.info("There was an interrupt while waiting for inserts to complete");
    }
}

From source file:org.locationtech.geogig.model.impl.LegacyTreeBuilder.java

private void checkPendingWrites() {
    final int pendingWritesThreshold = 10 * 1000;
    final boolean topLevelTree = this.depth == 0;// am I an actual (addressable) tree or bucket
                                                 // tree of a higher level one?
    final boolean forceWrite = pendingWritesCache.size() >= pendingWritesThreshold;
    if (!pendingWritesCache.isEmpty() && (topLevelTree || forceWrite)) {
        LOGGER.debug("calling db.putAll for {} buckets because {}...", pendingWritesCache.size(),
                (topLevelTree ? "writing top level tree"
                        : "there are " + pendingWritesCache.size() + " pending bucket writes"));
        Stopwatch sw2 = Stopwatch.createStarted();
        obStore.putAll(pendingWritesCache.values().iterator());
        pendingWritesCache.clear();/*from   w  ww .j  a  v a2 s  .c o m*/
        LOGGER.debug("done in {}", sw2.stop());
    }
}

From source file:ai.grakn.engine.SystemKeyspace.java

/**
 * Load the system ontology into a newly created system keyspace. Because the ontology
 * only consists of types, the inserts are idempotent and it is safe to load it
 * multiple times./*from w  ww  .java  2  s.  c  om*/
 */
public void loadSystemOntology() {
    Stopwatch timer = Stopwatch.createStarted();
    try (GraknGraph graph = factory.getGraph(SYSTEM_GRAPH_NAME, GraknTxType.WRITE)) {
        if (graph.getOntologyConcept(KEYSPACE_ENTITY) != null) {
            checkVersion(graph);
            return;
        }
        LOG.info("No other version found, loading ontology for version {}", GraknVersion.VERSION);
        loadSystemOntology(graph);
        graph.getResourceType(SYSTEM_VERSION).putResource(GraknVersion.VERSION);
        graph.admin().commitNoLogs();
        LOG.info("Loaded system ontology to system keyspace. Took: {}", timer.stop());
    } catch (Exception e) {
        LOG.error("Error while loading system ontology in {}. The error was: {}", timer.stop(), e.getMessage(),
                e);
        throw e;
    }
}

From source file:org.grouplens.lenskit.cli.Predict.java

@Override
public void execute() throws IOException, RecommenderBuildException {
    LenskitRecommenderEngine engine = loadEngine();

    long user = options.getLong("user");
    List<Long> items = options.get("items");

    LenskitRecommender rec = engine.createRecommender();
    RatingPredictor pred = rec.getRatingPredictor();
    if (pred == null) {
        logger.error("recommender has no rating predictor");
        throw new UnsupportedOperationException("no rating predictor");
    }//  w w  w  .j  a  va2 s  . com

    logger.info("predicting {} items", items.size());
    Symbol pchan = getPrintChannel();
    Stopwatch timer = Stopwatch.createStarted();
    SparseVector preds = pred.predict(user, items);
    Long2ObjectMap channel = null;
    if (pchan != null) {
        for (TypedSymbol sym : preds.getChannelSymbols()) {
            if (sym.getRawSymbol().equals(pchan)) {
                channel = preds.getChannel(sym);
            }
        }
    }
    for (VectorEntry e : preds.fast()) {
        System.out.format("  %d: %.3f", e.getKey(), e.getValue());
        if (channel != null) {
            System.out.format(" (%s)", channel.get(e.getKey()));
        }
        System.out.println();
    }
    timer.stop();
    logger.info("predicted for {} items in {}", items.size(), timer);
}

From source file:benchmarkio.benchmark.report.LoggingReport.java

@Override
public void aggregateAndPrintResults(final CoordinatorType coordinatorType,
        final CompletionService<Histogram> executorCompletionService, final int numTasks,
        final long totalNumberOfMessages, final Stopwatch stopwatch) {

    // Used to accumulate results from all histograms.
    final Histogram totalHistogram = Histograms.create();

    for (int i = 0; i < numTasks; i++) {
        try {//from w ww .j a v  a 2s.com
            final Future<Histogram> histogramFuture = executorCompletionService.take();
            totalHistogram.add(histogramFuture.get());
        } catch (final InterruptedException e) {
            log.error("Failed to retrieve data, got inturrupt signal", e);
            Thread.currentThread().interrupt();

            break;
        } catch (final ExecutionException e) {
            log.error("Failed to retrieve data", e);
        }
    }

    stopwatch.stop();
    final long durationInSeconds = stopwatch.elapsed(TimeUnit.SECONDS);
    final long durationInMs = stopwatch.elapsed(TimeUnit.MILLISECONDS);
    // Using the durationInMs, since I would loose precision when using durationInSeconds.
    final long throughputPerSecond = 1000 * totalNumberOfMessages / durationInMs;

    final long min = totalHistogram.getMinValue();
    final double percentile25 = totalHistogram.getPercentileAtOrBelowValue(25);
    final double percentile50 = totalHistogram.getPercentileAtOrBelowValue(50);
    final double percentile75 = totalHistogram.getPercentileAtOrBelowValue(75);
    final double percentile99 = totalHistogram.getPercentileAtOrBelowValue(99);
    final long max = totalHistogram.getMaxValue();
    final double mean = totalHistogram.getMean();
    final double stdDev = totalHistogram.getStdDeviation();

    log.info("=======================================");
    if (coordinatorType == CoordinatorType.PRODUCER) {
        log.info("PRODUCER STATS");
    } else {
        log.info("CONSUMER STATS");
    }
    log.info("=======================================");
    log.info("All units are {} unless stated otherwise", Consts.TIME_UNIT_FOR_REPORTING);
    log.info("DURATION (SECOND):      {}", durationInSeconds);
    log.info("THROUGHPUT / SECOND:    {}", throughputPerSecond);
    log.info("MIN:                    {}", min);
    log.info("25th percentile:        {}", percentile25);
    log.info("50th percentile:        {}", percentile50);
    log.info("75th percentile:        {}", percentile75);
    log.info("99th percentile:        {}", percentile99);
    log.info("MAX:                    {}", max);
    log.info("MEAN:                   {}", mean);
    log.info("STD DEVIATION:          {}", stdDev);
    log.info("\n\n\n");
}

From source file:org.apache.bookkeeper.replication.Auditor.java

public void start() {
    LOG.info("I'm starting as Auditor Bookie. ID: {}", bookieIdentifier);
    // on startup watching available bookie and based on the
    // available bookies determining the bookie failures.
    synchronized (this) {
        if (executor.isShutdown()) {
            return;
        }// w ww. j a v a2  s  .c o m

        long interval = conf.getAuditorPeriodicCheckInterval();

        if (interval > 0) {
            LOG.info("Auditor periodic ledger checking enabled" + " 'auditorPeriodicCheckInterval' {} seconds",
                    interval);
            executor.scheduleAtFixedRate(new Runnable() {
                public void run() {
                    try {
                        if (!ledgerUnderreplicationManager.isLedgerReplicationEnabled()) {
                            LOG.info("Ledger replication disabled, skipping");
                            return;
                        }

                        Stopwatch stopwatch = new Stopwatch().start();
                        checkAllLedgers();
                        checkAllLedgersTime.registerSuccessfulEvent(stopwatch.stop().elapsedMillis(),
                                TimeUnit.MILLISECONDS);
                    } catch (KeeperException ke) {
                        LOG.error("Exception while running periodic check", ke);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        LOG.error("Interrupted while running periodic check", ie);
                    } catch (BKAuditException bkae) {
                        LOG.error("Exception while running periodic check", bkae);
                    } catch (BKException bke) {
                        LOG.error("Exception running periodic check", bke);
                    } catch (IOException ioe) {
                        LOG.error("I/O exception running periodic check", ioe);
                    } catch (ReplicationException.UnavailableException ue) {
                        LOG.error("Underreplication manager unavailable " + "running periodic check", ue);
                    }
                }
            }, interval, interval, TimeUnit.SECONDS);
        } else {
            LOG.info("Periodic checking disabled");
        }
        try {
            notifyBookieChanges();
            knownBookies = getAvailableBookies();
        } catch (BKException bke) {
            LOG.error("Couldn't get bookie list, exiting", bke);
            submitShutdownTask();
        }

        long bookieCheckInterval = conf.getAuditorPeriodicBookieCheckInterval();
        if (bookieCheckInterval == 0) {
            LOG.info("Auditor periodic bookie checking disabled, running once check now anyhow");
            executor.submit(BOOKIE_CHECK);
        } else {
            LOG.info("Auditor periodic bookie checking enabled"
                    + " 'auditorPeriodicBookieCheckInterval' {} seconds", bookieCheckInterval);
            executor.scheduleAtFixedRate(BOOKIE_CHECK, 0, bookieCheckInterval, TimeUnit.SECONDS);
        }
    }
}