Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:benchmark.hbase.report.LoggingReport.java

@Override
public void aggregateAndPrintResults(BenchmarkType benchMarkType,
        CompletionService<Histogram> executorCompletionService, int numOfThreads, long numOfRecords,
        Stopwatch executorStartTime) {

    // Used to accumulate results from all histograms.
    final Histogram totalHistogram = Histograms.create();

    for (int i = 0; i < numOfThreads; i++) {
        try {//from  w ww. j a va 2s . c  om
            final Future<Histogram> histogramFuture = executorCompletionService.take();
            Histogram histogram = histogramFuture.get();
            totalHistogram.add(histogram);
        } catch (final InterruptedException e) {
            log.error("Failed to retrieve data, got inturrupt signal", e);
            Thread.currentThread().interrupt();

            break;
        } catch (final ExecutionException e) {
            log.error("Failed to retrieve data", e);
        }
    }

    executorStartTime.stop();
    final long durationInSeconds = executorStartTime.elapsedTime(TimeUnit.SECONDS);
    final long durationInMs = executorStartTime.elapsedTime(TimeUnit.MILLISECONDS);
    // Using the durationInMs, since I would loose precision when using durationInSeconds.
    final long throughputPerSecond = 1000 * numOfRecords / durationInMs;

    final long min = totalHistogram.getMinValue();
    final double percentile25 = totalHistogram.getValueAtPercentile(25);
    final double percentile50 = totalHistogram.getValueAtPercentile(50);
    final double percentile75 = totalHistogram.getValueAtPercentile(75);
    final double percentile95 = totalHistogram.getValueAtPercentile(95);
    final double percentile99 = totalHistogram.getValueAtPercentile(99);
    final long max = totalHistogram.getMaxValue();
    final double mean = totalHistogram.getMean();
    final double stdDev = totalHistogram.getStdDeviation();
    final long totalMessagesCount = totalHistogram.getTotalCount();

    logInfo("=======================================");
    if (benchMarkType == BenchmarkType.READ_ONLY) {
        logInfo("READ ONLY BENCHMARK STATS");
    } else if (benchMarkType == BenchmarkType.WRITE_ONLY) {
        logInfo("WRITE ONLY BENCHMARK STATS");
    } else if (benchMarkType == BenchmarkType.READ_AND_WRITE) {
        logInfo("READ AND WRITE BENCHMARK STATS");
    } else {
        logInfo("UNKNOWN BENCHMARK STATS");
    }
    logInfo("=======================================");
    logInfo("DURATION (SECOND):      {}", durationInSeconds);
    logInfo("THROUGHPUT / SECOND:    {}", throughputPerSecond);
    logInfo("MIN:                    {}", min);
    logInfo("25th percentile:        {}", percentile25);
    logInfo("50th percentile:        {}", percentile50);
    logInfo("75th percentile:        {}", percentile75);
    logInfo("95th percentile:        {}", percentile95);
    logInfo("99th percentile:        {}", percentile99);
    logInfo("MAX:                    {}", max);
    logInfo("MEAN:                   {}", mean);
    logInfo("STD DEVIATION:          {}", stdDev);
    logInfo("CONCURRANCY:            {}", numOfThreads);
    logInfo("TotalRecords:           {}", totalMessagesCount);
    logInfo("\n\n\n");

}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Scan scan = getScan();/* w w w  .j  a va 2s  . co  m*/

    String jobName = "testScanMapReduce";

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setJarByClass(getClass());

    TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class,
            NullWritable.class, job);

    job.setNumReduceTasks(0);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);

    scanTimer.start();
    job.waitForCompletion(true);
    scanTimer.stop();

    Counters counters = job.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

    long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan mapreduce: ");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");
}

From source file:org.apache.jackrabbit.oak.run.CompactCommand.java

@Override
public void execute(String... args) throws Exception {
    OptionParser parser = new OptionParser();
    OptionSpec<String> directoryArg = parser.nonOptions("Path to segment store (required)")
            .ofType(String.class);
    OptionSpec<Void> forceFlag = parser.accepts("force",
            "Force compaction and ignore non matching segment version");
    OptionSpec<?> segmentTar = parser.accepts("segment-tar", "Use oak-segment-tar instead of oak-segment");
    OptionSet options = parser.parse(args);

    String path = directoryArg.value(options);
    if (path == null) {
        System.err.println("Compact a file store. Usage: compact [path] <options>");
        parser.printHelpOn(System.err);
        System.exit(-1);//from   www . ja  v a  2 s  .  co m
    }

    File directory = new File(path);
    boolean force = options.has(forceFlag);

    boolean success = false;
    Set<String> beforeLs = newHashSet();
    Set<String> afterLs = newHashSet();
    Stopwatch watch = Stopwatch.createStarted();

    System.out.println("Compacting " + directory);
    System.out.println("    before ");
    beforeLs.addAll(list(directory));
    long sizeBefore = FileUtils.sizeOfDirectory(directory);
    System.out
            .println("    size " + IOUtils.humanReadableByteCount(sizeBefore) + " (" + sizeBefore + " bytes)");
    System.out.println("    -> compacting");

    try {
        if (options.has(segmentTar)) {
            SegmentTarUtils.compact(directory, force);
        } else {
            SegmentUtils.compact(directory, force);
        }
        success = true;
    } catch (Throwable e) {
        System.out.println("Compaction failure stack trace:");
        e.printStackTrace(System.out);
    } finally {
        watch.stop();
        if (success) {
            System.out.println("    after ");
            afterLs.addAll(list(directory));
            long sizeAfter = FileUtils.sizeOfDirectory(directory);
            System.out.println(
                    "    size " + IOUtils.humanReadableByteCount(sizeAfter) + " (" + sizeAfter + " bytes)");
            System.out.println("    removed files " + difference(beforeLs, afterLs));
            System.out.println("    added files " + difference(afterLs, beforeLs));
            System.out.println("Compaction succeeded in " + watch.toString() + " ("
                    + watch.elapsed(TimeUnit.SECONDS) + "s).");
        } else {
            System.out.println("Compaction failed in " + watch.toString() + " ("
                    + watch.elapsed(TimeUnit.SECONDS) + "s).");
            System.exit(1);
        }
    }
}

From source file:cosmos.impl.CosmosImpl.java

@Override
public void addResult(Store id, Record<?> queryResult) throws Exception {
    checkNotNull(queryResult);//from  w w  w.ja  v a2 s  .  co m

    Stopwatch sw = new Stopwatch().start();
    try {
        addResults(id, Single.<Record<?>>create(queryResult));
    } finally {
        sw.stop();
        id.tracer().addTiming("Cosmos:addResult", sw.elapsed(TimeUnit.MILLISECONDS));
    }
}

From source file:org.terasology.rendering.primitives.ChunkTessellator.java

public ChunkMesh generateMesh(ChunkView chunkView, int meshHeight, int verticalOffset) {
    PerformanceMonitor.startActivity("GenerateMesh");
    ChunkMesh mesh = new ChunkMesh(bufferPool);

    final Stopwatch watch = Stopwatch.createStarted();

    for (int x = 0; x < ChunkConstants.SIZE_X; x++) {
        for (int z = 0; z < ChunkConstants.SIZE_Z; z++) {
            for (int y = verticalOffset; y < verticalOffset + meshHeight; y++) {
                Biome biome = chunkView.getBiome(x, y, z);

                Block block = chunkView.getBlock(x, y, z);
                if (block != null && !block.isInvisible()) {
                    generateBlockVertices(chunkView, mesh, x, y, z, biome);
                }//from   w  w  w . j  a va 2 s  .  c o m
            }
        }
    }
    watch.stop();

    mesh.setTimeToGenerateBlockVertices((int) watch.elapsed(TimeUnit.MILLISECONDS));

    watch.reset().start();
    generateOptimizedBuffers(chunkView, mesh);
    watch.stop();
    mesh.setTimeToGenerateOptimizedBuffers((int) watch.elapsed(TimeUnit.MILLISECONDS));
    statVertexArrayUpdateCount++;

    PerformanceMonitor.endActivity();
    return mesh;
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraJMXCompactionManager.java

/**
 * A thread pool with #(nodes) threads will be created to wait for compaction to complete.
 *
 * @param timeout - timeout for compaction. After timeout, the compaction task will not be canceled.
 * @param keyspace - keyspace for the tables to be compacted
 * @param tableName - tables to be compacted
 * @throws TimeoutException - TimeoutException is thrown when compaction cannot finish in a given time period.
 *///w  w w . j a  v a  2s.  co  m
public void forceTableCompaction(long timeout, final String keyspace, final String tableName)
        throws TimeoutException {
    if (compactionClients.isEmpty()) {
        log.error(
                "No compaction client found in CassandraJMXCompactionManager, cannot perform compaction on {}.{}.",
                keyspace, tableName);
        log.error("Follow steps as follows to use Cassandra JMX compaction feature:");
        log.error("1. Enable JMX options in `cassandra-env.sh`, prefer to use SSL authentication option.");
        log.error("2. Add \"jmx\"=true in ATLAS_KVS_PREFERENCES.");
        return;
    }

    // gc_grace_period is turned off. Now delete all hintedHandoffs
    // hintedHandoffs need to be deleted on all nodes before running Cassandra compaction
    for (CassandraJMXCompaction compaction : compactionClients) {
        compaction.deleteLocalHints();
    }

    // execute the compaction to remove the tombstones
    // the reason to create a threadpool every time this function is called is because we need to call shutdown()
    // to make awaitTermination() return if tasks complete before timeout.
    ExecutorService exec = Executors.newFixedThreadPool(compactionClients.size(),
            new ThreadFactoryBuilder().setNameFormat("CassandraCompactionThreadPool-%d").build());
    Stopwatch timer = Stopwatch.createStarted();
    for (final CassandraJMXCompaction compaction : compactionClients) {
        exec.submit(new Runnable() {
            @Override
            public void run() {
                compaction.forceTableFlush(keyspace, tableName);
                compaction.forceTableCompaction(keyspace, tableName);
            }
        });
    }
    // shutdown makes exec.awaitTermination() return if job completes before timeout
    exec.shutdown();
    try {
        boolean isExecuted = exec.awaitTermination(timeout, TimeUnit.SECONDS);
        if (isExecuted) {
            log.info("All compaction tasks for {}.{} consumed {}", keyspace, Arrays.asList(tableName),
                    timer.stop());
        } else {
            exec.shutdownNow();
            throw new TimeoutException(String.format("Compaction timeout for %s:%s in %d seconds", keyspace,
                    Arrays.asList(tableName), timeout));
        }
    } catch (InterruptedException e) {
        log.error("Waiting for compaction is interupted for {}.{}. Error: {}", keyspace,
                Arrays.asList(tableName), e.getMessage());
    }
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("hdfs/{cluster}/")
@Produces(MediaType.APPLICATION_JSON)// ww  w .  j  a va  2  s .  c  o m
public List<HdfsStats> getHdfsStats(@PathParam("cluster") String cluster,
        // run Id is timestamp in seconds
        @QueryParam("timestamp") long runid, @QueryParam("path") String pathPrefix,
        @QueryParam("limit") int limit) throws IOException {
    if (limit == 0) {
        limit = HdfsConstants.RECORDS_RETURNED_LIMIT;
    }

    boolean noRunId = false;
    if (runid == 0L) {
        // default it to 2 hours back
        long lastHour = System.currentTimeMillis() - 2 * 3600000L;
        // convert milliseconds to seconds
        runid = lastHour / 1000L;
        noRunId = true;
    }

    LOG.info(String.format("Fetching hdfs stats for cluster=%s, path=%s limit=%d, runId=%d", cluster,
            pathPrefix, limit, runid));
    Stopwatch timer = new Stopwatch().start();
    serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING));
    List<HdfsStats> hdfsStats = getHdfsStatsService().getAllDirs(cluster, pathPrefix, limit, runid);
    timer.stop();
    /**
     * if we find NO hdfs stats for the default timestamp
     * consider the case when no runId is passed
     * in that means user is expecting a default response
     * we set the default runId to 2 hours back
     * as above but what if there was an error in
     * collection at that time? hence we try to look back
     * for some older runIds
     */
    if (hdfsStats == null || hdfsStats.size() == 0L) {
        if (noRunId) {
            // consider reading the daily aggregation table instead of hourly
            // or consider reading older data since runId was a default timestamp
            int retryCount = 0;
            while (retryCount < HdfsConstants.ageMult.length) {
                runid = HdfsStatsService.getOlderRunId(retryCount, runid);
                hdfsStats = getHdfsStatsService().getAllDirs(cluster, pathPrefix, limit, runid);
                if ((hdfsStats != null) && (hdfsStats.size() != 0L)) {
                    break;
                }
                retryCount++;
            }
        }
    }

    // export latency metrics
    HravenResponseMetrics.HDFS_STATS_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return hdfsStats;
}

From source file:net.thangbui.cql_exporter.SchemaExporter.java

public void run() throws Exception {
    Stopwatch stopwatch = Stopwatch.createStarted();

    KeyspaceMetadata keyspace = validate();

    System.out.println("All good!");
    System.out.println("Start exporting...");

    long freeMemory = Runtime.getRuntime().freeMemory();
    FETCH_SIZE = Math.min(NO_OF_ENTRY_BOUND, (int) (freeMemory / 1000));
    if (Main.VERBOSE) {
        System.out.println("Free memory: " + freeMemory / 1024 / 1024 + " mb");
        System.out.println("Fetch size is set to: " + FETCH_SIZE);
    }//from  w w w . j  a v a2s .c o  m

    if (Strings.isNullOrEmpty(tableName)) {
        extractKeyspace(keyspace);
    } else {
        extractOnlyOneTable(keyspace);
    }

    stopwatch.stop();
    System.out.printf("Export completed after %s s!" + Main.LINE_SEPARATOR,
            (float) stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000);
    System.out.println("Exited.");
}

From source file:org.apache.hadoop.hbase.zookeeper.MetaTableLocator.java

/**
 * Wait until the meta region is available and is not in transition.
 * @param zkw//from   ww w.ja  v  a 2s.  co  m
 * @param replicaId
 * @param timeout
 * @return ServerName or null if we timed out.
 * @throws InterruptedException
 */
public ServerName blockUntilAvailable(final ZooKeeperWatcher zkw, int replicaId, final long timeout)
        throws InterruptedException {
    if (timeout < 0)
        throw new IllegalArgumentException();
    if (zkw == null)
        throw new IllegalArgumentException();
    Stopwatch sw = new Stopwatch().start();
    ServerName sn = null;
    try {
        while (true) {
            sn = getMetaRegionLocation(zkw, replicaId);
            if (sn != null || sw.elapsedMillis() > timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
                break;
            }
            Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
        }
    } finally {
        sw.stop();
    }
    return sn;
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Scan scan = getScan();/*from w w w.j a v a 2 s  .c  om*/

    String jobName = "testSnapshotScanMapReduce";

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setJarByClass(getClass());

    TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, NullWritable.class,
            NullWritable.class, job, true, new Path(restoreDir));

    job.setNumReduceTasks(0);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);

    scanTimer.start();
    job.waitForCompletion(true);
    scanTimer.stop();

    Counters counters = job.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

    long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan mapreduce: ");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");
}