Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:uk.ac.ebi.atlas.search.diffanalytics.DiffAnalyticsDao.java

public List<DiffAnalytics> fetchTopExpressions(Optional<Collection<IndexedAssayGroup>> indexedContrasts,
        Optional<? extends Collection<String>> geneIds, String species) {
    Optional<ImmutableSet<IndexedAssayGroup>> uniqueIndexedContrasts = uniqueIndexedContrasts(indexedContrasts);

    log("fetchTopExpressions", uniqueIndexedContrasts, geneIds);

    Stopwatch stopwatch = Stopwatch.createStarted();

    DatabaseQuery<Object> indexedContrastQuery = buildSelect(uniqueIndexedContrasts, geneIds, species);

    jdbcTemplate.setMaxRows(RESULT_SIZE);

    List<DiffAnalytics> results;

    try {/* ww  w . ja  v a2 s. c  o  m*/
        results = jdbcTemplate.query(indexedContrastQuery.getQuery(), dbeRowMapper,
                indexedContrastQuery.getParameters().toArray());

        stopwatch.stop();

        LOGGER.debug(String.format("fetchTopExpressions returned %s expressions in %.2f seconds",
                results.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000D));

    } catch (Exception e) {
        LOGGER.error(e.getMessage(), e);
        throw e;
    }

    return results;

}

From source file:org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats.java

private OnDemandRaftState getOnDemandRaftState() {
    String name = getShardName();
    OnDemandRaftState state = onDemandRaftStateCache.getIfPresent(name);
    if (state == null) {
        statRetrievalError = null;//from   w w w  . j  a  va  2  s . co  m
        statRetrievalTime = null;

        if (shard != null) {
            Timeout timeout = new Timeout(10, TimeUnit.SECONDS);
            try {
                Stopwatch timer = Stopwatch.createStarted();

                state = (OnDemandRaftState) Await.result(
                        Patterns.ask(shard.getSelf(), GetOnDemandRaftState.INSTANCE, timeout),
                        timeout.duration());

                statRetrievalTime = timer.stop().toString();
                onDemandRaftStateCache.put(name, state);
            } catch (Exception e) {
                statRetrievalError = e.toString();
            }
        }

        state = state != null ? state : OnDemandRaftState.builder().build();
    }

    return state;
}

From source file:org.locationtech.geogig.remotes.pack.PackImpl.java

private RefDiff applyToPreOrder(PackProcessor target, RefRequest req, Deduplicator deduplicator,
        ProgressListener progress) {/* www.j  a va 2  s .c o  m*/

    progress.setDescription("Saving missing revision objects changes for " + req.name);
    ObjectReporter objectReport = new ObjectReporter(progress);

    // back up current progress indicator
    final Function<ProgressListener, String> defaultProgressIndicator;
    defaultProgressIndicator = progress.progressIndicator();
    // set our custom progress indicator
    progress.setProgressIndicator((p) -> objectReport.toString());

    final List<RevCommit> commits = missingCommits.get(req);
    checkNotNull(commits);

    final ObjectDatabase sourceStore = source.objectDatabase();

    List<ObjectId[]> diffRootTreeIds = collectMissingRootTreeIdPairs(commits, sourceStore);

    final ContentIdsProducer producer = ContentIdsProducer.forCommits(sourceStore, diffRootTreeIds,
            deduplicator, objectReport);

    final ExecutorService producerThread = Executors.newSingleThreadExecutor();
    try {
        producerThread.submit(producer);
        Iterator<ObjectId> missingContentIds = producer.iterator();

        Iterator<RevObject> allObjects;
        {
            Iterator<RevObject> missingContents;
            Iterator<RevCommit> commitsIterator;
            missingContents = sourceStore.getAll(() -> missingContentIds);
            commitsIterator = Iterators.filter(commits.iterator(), (c) -> {
                objectReport.addCommit();
                return true;
            });

            allObjects = Iterators.concat(missingContents, commitsIterator);
        }
        final Stopwatch sw = Stopwatch.createStarted();

        target.putAll(allObjects, objectReport);
        progress.complete();
        if (objectReport.total.get() > 0) {
            progress.started();
            String description = String.format("Objects inserted: %,d, repeated: %,d, time: %s",
                    objectReport.inserted(), objectReport.found(), sw.stop());
            progress.setDescription(description);
        }
    } finally {
        producerThread.shutdownNow();
        // restore previous progress indicator
        progress.setProgressIndicator(defaultProgressIndicator);
    }

    Ref oldRef = req.have.isPresent() ? new Ref(req.name, req.have.get()) : null;
    Ref newRef = new Ref(req.name, req.want);
    RefDiff changedRef = new RefDiff(oldRef, newRef);

    return changedRef;
}

From source file:pro.foundev.examples.spark_streaming.java.interactive.smartconsumer.DeduplicatingRabbitMQConsumer.java

public void run() {
    ConnectionFactory factory = new ConnectionFactory();
    factory.setHost("localhost");
    try {//from w  w w.  ja v  a  2  s .c  o m
        Connection connection = factory.newConnection();
        Channel channel = connection.createChannel();
        QueueingConsumer consumer = new QueueingConsumer(channel);
        channel.basicConsume(EXCHANGE_NAME, true, consumer);
        Set<String> messages = new HashSet<>();
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        while (true) {
            QueueingConsumer.Delivery delivery = consumer.nextDelivery();
            String message = new String(delivery.getBody());
            messages.add(message);

            if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > 1000l) {
                System.out.println("it should be safe to submit messages now");
                for (String m : messages) {
                    //notifying user interface
                    System.out.println(m);
                }
                stopwatch.stop();
                stopwatch.reset();
                messages.clear();
            }
            if (messages.size() > 100000000) {
                System.out.println("save off to file system and clear before we lose everything");
                messages.clear();
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:joshelser.LimitAndSumColumnFamilyIterator.java

@Override
public void next() throws IOException {
    Stopwatch nextSw = Stopwatch.createStarted();

    // Make sure we invalidate our last record
    nextRecordNotFound();/*from  w w  w .  j av a  2 s.  co  m*/

    // Catch the case where we have no data and there's nothing to next()
    if (!getSource().hasTop()) {
        return;
    }

    getSource().next();

    aggregate();

    nextSw.stop();
    log.info("Next duration: " + nextSw.elapsed(TimeUnit.MILLISECONDS));
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testSnapshotScan() throws IOException {
    Stopwatch snapshotRestoreTimer = new Stopwatch();
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Path restoreDir = new Path(this.restoreDir);

    snapshotRestoreTimer.start();/*from  w  w  w .j  a v  a 2 s . c  o  m*/
    restoreDir.getFileSystem(conf).delete(restoreDir, true);
    snapshotRestoreTimer.stop();

    Scan scan = getScan();
    scanOpenTimer.start();
    TableSnapshotScanner scanner = new TableSnapshotScanner(conf, restoreDir, snapshotName, scan);
    scanOpenTimer.stop();

    long numRows = 0;
    long numCells = 0;
    scanTimer.start();
    while (true) {
        Result result = scanner.next();
        if (result == null) {
            break;
        }
        numRows++;

        numCells += result.rawCells().length;
    }
    scanTimer.stop();
    scanner.close();

    ScanMetrics metrics = scanner.getScanMetrics();
    long totalBytes = metrics.countOfBytesInResults.get();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan snapshot: ");
    System.out.println("total time to restore snapshot: " + snapshotRestoreTimer.elapsedMillis() + " ms");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println("Scan metrics:\n" + metrics.getMetricsMap());

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");

}

From source file:benchmarkio.consumer.rabbitmq.RabbitMQMessageConsumer.java

@Override
public Histogram call() throws Exception {
    final String queueName = channel.queueDeclare().getQueue();
    channel.queueBind(queueName, topic, "#");

    final QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume(queueName, true, consumer);

    logger.info("topic: {}, queueName: {}", topic, queueName);

    int messageCount = 0;

    // Note that this is a polling consumer and will be terminated
    // whenever the Consts.POLLING_CONSUMER_MAX_IDLE_TIME_MS passes and no new messages have arrived.
    while (true) {
        // Start/*from   w w  w  .j  a  v  a  2s . com*/
        final Stopwatch stopwatch = Stopwatch.createStarted();

        final QueueingConsumer.Delivery delivery = consumer
                .nextDelivery(Consts.POLLING_CONSUMER_MAX_IDLE_TIME_MS);

        if (delivery == null) {
            logger.info("Consumer was terminated through timeout");

            break;
        }

        final byte[] message = delivery.getBody();

        messageCount++;

        final String routingKey = delivery.getEnvelope().getRoutingKey();

        // End
        stopwatch.stop();
        histogram.recordValue(stopwatch.elapsed(Consts.TIME_UNIT_FOR_REPORTING));
    }

    logger.info("In total consumed {} messages", messageCount);

    return histogram;
}

From source file:com.madgag.agit.FileListFragment.java

@Override
public Loader<List<FilePath>> onCreateLoader(int id, Bundle args) {
    return new AsyncLoader<List<FilePath>>(getActivity()) {
        public List<FilePath> loadInBackground() {

            try {
                Bundle args = getArguments();
                Repository repo = new FileRepository(args.getString(GITDIR));
                RevCommit commit = new RevWalk(repo).parseCommit(repo.resolve(args.getString(REVISION)));

                Stopwatch stopwatch = new Stopwatch().start();

                final List<FilePath> paths = newArrayList();
                TreeWalk treeWalk = new TreeWalk(repo);
                treeWalk.setRecursive(true);
                treeWalk.addTree(commit.getTree());

                while (treeWalk.next()) {
                    paths.add(new FilePath(treeWalk.getRawPath()));
                }//w  ww .j a v  a 2s.com
                Log.d(TAG, "Found " + paths.size() + " files " + stopwatch.stop());

                new Thread(new Runnable() {
                    @Override
                    public void run() { // knocks around 15-30% off time-to-display the list
                        Stopwatch stopwatch = new Stopwatch().start();
                        for (FilePath filePath : paths) {
                            filePath.getPath();
                        }
                        Log.d(TAG,
                                "Converted " + paths.size() + " path byte buffs to string " + stopwatch.stop());
                    }
                }).start();
                return paths;
            } catch (Exception e) {
                Log.w(TAG, "Bang", e);
                throw new RuntimeException(e);
            }
        }
    };
}

From source file:qa.qcri.nadeef.console.Console.java

private static void load(String cmdLine) throws IOException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    String[] splits = cmdLine.split("\\s");
    if (splits.length != 2) {
        console.println("Invalid load command. Run load <Nadeef config file>.");
        return;//from  ww w .ja  v  a 2s .  co m
    }
    String fileName = splits[1];
    File file = CommonTools.getFile(fileName);

    // shutdown existing executors
    for (CleanExecutor executor : executors) {
        executor.shutdown();
    }
    executors.clear();

    FileReader reader = null;
    try {
        reader = new FileReader(file);
        DBConfig dbConfig = NadeefConfiguration.getDbConfig();
        cleanPlans = CleanPlan.create(reader, dbConfig);
        for (CleanPlan cleanPlan : cleanPlans) {
            executors.add(new CleanExecutor(cleanPlan, dbConfig));
        }
    } catch (Exception ex) {
        tracer.err("Loading CleanPlan failed.", ex);
        return;
    } finally {
        if (reader != null)
            reader.close();
    }

    console.println(
            cleanPlans.size() + " rules loaded in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms.");
    stopwatch.stop();
}

From source file:org.lenskit.cli.commands.Predict.java

@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
public void execute(Namespace opts) throws IOException, RecommenderBuildException {
    Context ctx = new Context(opts);
    LenskitRecommenderEngine engine = ctx.loader.loadEngine();

    long user = ctx.options.getLong("user");
    List<Long> items = ctx.options.get("items");

    try (LenskitRecommender rec = engine.createRecommender()) {
        RatingPredictor pred = rec.getRatingPredictor();
        ItemNameDAO names = rec.get(ItemNameDAO.class);
        if (pred == null) {
            logger.error("recommender has no rating predictor");
            throw new UnsupportedOperationException("no rating predictor");
        }/*www  .  ja  v a 2  s  . c om*/

        logger.info("predicting {} items", items.size());
        Stopwatch timer = Stopwatch.createStarted();
        Map<Long, Double> preds = pred.predict(user, items);
        System.out.format("predictions for user %d:%n", user);
        for (Map.Entry<Long, Double> e : preds.entrySet()) {
            System.out.format("  %d", e.getKey());
            if (names != null) {
                System.out.format(" (%s)", names.getItemName(e.getKey()));
            }
            System.out.format(": %.3f", e.getValue());
            System.out.println();
        }
        timer.stop();
        logger.info("predicted for {} items in {}", items.size(), timer);
    }
}