Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:com.github.benmanes.multiway.EliminationProfile.java

void scheduleStatusTask() {
    Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(new Runnable() {
        final Stopwatch stopwatch = new Stopwatch().start();

        @Override//from   www .j  a v  a2  s  . c  om
        public void run() {
            long count = calls.longValue();
            long rate = count / stopwatch.elapsed(TimeUnit.SECONDS);
            System.out.printf("%s - %,d [%,d / sec]\n", stopwatch, count, rate);
        }
    }, DISPLAY_DELAY_SEC, DISPLAY_DELAY_SEC, TimeUnit.SECONDS);
}

From source file:org.apache.kylin.storage.hbase.cube.v2.filter.MassInValueProviderImpl.java

public MassInValueProviderImpl(Functions.FilterTableType filterTableType, String filterResourceIdentifier,
        DimensionEncoding encoding) {//from   www  . j  ava2 s  .  com

    if (filterTableType == Functions.FilterTableType.HDFS) {

        logger.info("Start to load HDFS filter table from " + filterResourceIdentifier);
        Stopwatch stopwatch = new Stopwatch().start();

        FileSystem fileSystem = null;
        try {
            synchronized (hdfs_caches) {

                // directly create hbase configuration here due to no KYLIN_CONF definition.
                fileSystem = FileSystem.get(HBaseConfiguration.create());

                long modificationTime = fileSystem.getFileStatus(new Path(filterResourceIdentifier))
                        .getModificationTime();
                Pair<Long, Set<ByteArray>> cached = hdfs_caches.getIfPresent(filterResourceIdentifier);
                if (cached != null && cached.getFirst().equals(modificationTime)) {
                    ret = cached.getSecond();
                    logger.info("Load HDFS from cache using " + stopwatch.elapsedMillis() + " millis");
                    return;
                }

                InputStream inputStream = fileSystem.open(new Path(filterResourceIdentifier));
                List<String> lines = IOUtils.readLines(inputStream);

                logger.info("Load HDFS finished after " + stopwatch.elapsedMillis() + " millis");

                for (String line : lines) {
                    if (StringUtils.isEmpty(line)) {
                        continue;
                    }

                    try {
                        ByteArray byteArray = ByteArray.allocate(encoding.getLengthOfEncoding());
                        encoding.encode(line.getBytes(), line.getBytes().length, byteArray.array(), 0);
                        ret.add(byteArray);
                    } catch (Exception e) {
                        logger.warn("Error when encoding the filter line " + line);
                    }
                }

                hdfs_caches.put(filterResourceIdentifier, Pair.newPair(modificationTime, ret));

                logger.info("Mass In values constructed after " + stopwatch.elapsedMillis()
                        + " millis, containing " + ret.size() + " entries");
            }

        } catch (IOException e) {
            throw new RuntimeException("error when loading the mass in values", e);
        }
    } else {
        throw new RuntimeException("HBASE_TABLE FilterTableType Not supported yet");
    }
}

From source file:org.caleydo.data.importer.tcga.regular.TCGATask.java

@Override
protected JsonElement compute() {
    Stopwatch w = new Stopwatch().start();
    log.info(id + " start downloading");

    String run = Settings.format(analysisRun);
    String runSpecificOutputPath = settings.getDataDirectory(run);

    TCGADataSets project = new TCGADataSetGenerator(tumorType,
            settings.createFirehoseProvider(tumorType, analysisRun, dataRun), settings).invoke();

    if (project.isEmpty()) {
        log.warning(id + " no datasets were created, skipping");
        return null;
    }//ww w .jav  a2 s  .  c  om

    if (settings.isDownloadOnly()) {
        log.fine(id + " no project generation just downloading data");
        return null;
    }

    log.info(id + " loading project");

    Collection<ATableBasedDataDomain> dataDomains = loadProject(project);
    if (dataDomains.isEmpty()) {
        log.severe(id + " no datadomains were loaded, skipping");
        return null;
    }

    log.fine(id + " start post processing");
    for (TCGADataSet set : project) {
        new TCGAPostprocessingTask(set).invoke();
    }

    if (project.getMutsigParser() != null) {
        log.info(id + " start loading mutsig scores");
        loadExternalScores(project.getMutsigParser(), dataDomains);
    }

    final String projectOutputPath = runSpecificOutputPath + run + "_" + tumorType + ".cal";

    ProjectMetaData metaData = ProjectMetaData.createDefault();
    metaData.setName("TCGA " + tumorType.getName() + " Package");
    DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, Locale.ENGLISH);
    metaData.set("Analysis Run", df.format(analysisRun));
    metaData.set("Data Run", df.format(dataRun));
    metaData.set("Tumor", tumorType.getLabel());
    metaData.set("Report URL", settings.getReportUrl(analysisRun, tumorType));

    log.info(id + " saving project");
    if (!saveProject(dataDomains, projectOutputPath, metaData)) {
        log.severe(id + " saving error, skipping");
        return null;
    }

    saveProjectSpecificReport(dataDomains, tumorType, runSpecificOutputPath, run);

    project = null;

    String projectRemoteOutputURL = settings.getTcgaServerURL() + run + "/" + run + "_" + tumorType + ".cal";

    JsonObject report = generateTumorReportLine(dataDomains, tumorType, analysisRun, projectRemoteOutputURL);

    log.fine(id + " cleanup up datadomains: " + dataDomains);
    cleanUp(dataDomains);

    log.info(id + " done in " + w);
    return report;
}

From source file:co.cask.cdap.app.runtime.spark.SparkTransactionClient.java

/**
 * Returns the {@link Transaction} for the given stage.
 *
 * @param stageId the stage id to query for {@link Transaction}.
 * @param timeout the maximum time to wait
 * @param timeUnit the time unit of the timeout argument
 * @return the {@link Transaction} to be used for the given stage.
 *
 * @throws TimeoutException if the wait timed out
 * @throws InterruptedException if the current thread was interrupted while waiting
 * @throws TransactionFailureException if failed to get transaction for the given stage. Calling this method again
 *                                     with the same stage id will result in the same exception
 *///from w  w w.  j ava  2 s  .c  o  m
Transaction getTransaction(int stageId, long timeout, TimeUnit timeUnit)
        throws TimeoutException, InterruptedException, TransactionFailureException {
    long timeoutMillis = Math.max(0L, timeUnit.toMillis(timeout) - txPollIntervalMillis);
    Stopwatch stopwatch = new Stopwatch().start();
    Transaction transaction = getTransaction(stageId);

    while (transaction == null && stopwatch.elapsedMillis() < timeoutMillis) {
        TimeUnit.MILLISECONDS.sleep(txPollIntervalMillis);
        transaction = getTransaction(stageId);
    }
    if (transaction == null) {
        throw new TimeoutException(
                "Cannot get transaction for stage " + stageId + " after " + timeout + " " + timeUnit);
    }
    return transaction;
}

From source file:org.apache.drill.exec.store.schedule.OldAssignmentCreator.java

OldAssignmentCreator(List<DrillbitEndpoint> incomingEndpoints, List<T> units) {
    logger.debug("Assigning {} units to {} endpoints", units.size(), incomingEndpoints.size());
    Stopwatch watch = new Stopwatch();

    Preconditions.checkArgument(incomingEndpoints.size() <= units.size(),
            String.format("Incoming endpoints %d " + "is greater than number of row groups %d",
                    incomingEndpoints.size(), units.size()));
    this.mappings = ArrayListMultimap.create();
    this.endpoints = Lists.newLinkedList(incomingEndpoints);

    ArrayList<T> rowGroupList = new ArrayList<>(units);
    for (double cutoff : ASSIGNMENT_CUTOFFS) {
        scanAndAssign(rowGroupList, cutoff, false, false);
    }/*from  w w  w.j  av a  2s . c o m*/
    scanAndAssign(rowGroupList, 0.0, true, false);
    scanAndAssign(rowGroupList, 0.0, true, true);

    logger.debug("Took {} ms to apply assignments", watch.elapsed(TimeUnit.MILLISECONDS));
    Preconditions.checkState(rowGroupList.isEmpty(),
            "All readEntries should be assigned by now, but some are still unassigned");
    Preconditions.checkState(!units.isEmpty());

}

From source file:demos.BatchInsert.java

public void run() {
    try {/*ww  w  . j  a  va2s  . c om*/
        logger.info("Preparing to insert metric data points");

        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
        Session session = cluster.connect("demo");
        PreparedStatement insert = session
                .prepare("insert into metric_data (metric_id, time, value) values (?, ?, ?)");
        Random random = new Random();
        DateTime time = DateTime.now().minusYears(1);
        final CountDownLatch latch = new CountDownLatch(NUM_INSERTS / BATCH_SIZE);

        FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {
            @Override
            public void onSuccess(ResultSet result) {
                latch.countDown();
            }

            @Override
            public void onFailure(Throwable t) {
                logger.warn("There was an error inserting data", t);
                latch.countDown();
            }
        };

        Stopwatch stopwatch = new Stopwatch().start();
        BatchStatement batch = new BatchStatement();
        for (int i = 0; i < NUM_INSERTS; ++i) {
            String metricId = "metric-" + Math.abs(random.nextInt() % NUM_METRICS);
            double value = random.nextDouble();
            batch.add(insert.bind(metricId, time.toDate(), value));
            time = time.plusSeconds(10);
            if (batch.size() == BATCH_SIZE) {
                ResultSetFuture future = session.executeAsync(batch);
                Futures.addCallback(future, callback);
                batch = new BatchStatement();
            }
        }
        latch.await();
        stopwatch.stop();

        logger.info("Finished inserting {} data points in {} ms", NUM_INSERTS,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (InterruptedException e) {
        logger.info("There was an interrupt while waiting for inserts to complete");
    }
}

From source file:com.couchbase.roadrunner.workloads.Workload.java

public Workload(final Bucket bucket, final String name, final int ramp, final DocumentFactory documentFactory) {
    this.bucket = bucket;
    this.workloadName = name;
    this.measures = new HashMap<String, List<Stopwatch>>();
    this.measuredOps = 0;
    this.totalOps = 0;
    this.ramp = ramp;
    this.elapsed = new Stopwatch();
    this.documentFactory = documentFactory;
}

From source file:pro.foundev.strategies.BenchmarkStrategy.java

private void exec(Runnable runnable, String name, int runs, BenchmarkReport report) {
    logger.info("Starting run of " + name);
    DescriptiveStatistics stats = new DescriptiveStatistics();

    Stopwatch timer = new Stopwatch();
    for (int i = 0; i < runs; i++) {
        timer.start();/* w w  w  .ja  v  a2  s  . c o m*/
        runnable.run();
        timer.stop();
        logger.info("Time to execute load run #" + i + " it took " + timer);
        stats.addValue(timer.elapsed(TimeUnit.MILLISECONDS));
        timer.reset();
    }
    logger.info("Finished run of " + name);
    report.addLine(name, stats.getMin(), stats.getMax(), stats.getPercentile(50), stats.getPercentile(90),
            stats.getMean());
}

From source file:com.couchbase.roadrunner.workloads.GetSetWorkload.java

private void setWorkloadWithMeasurement(String key) throws Exception {
    Stopwatch watch = new Stopwatch().start();
    setWorkload(key);// www .  j a  v  a 2s . c om
    watch.stop();
    addMeasure("set", watch);
}

From source file:org.rhq.metrics.simulator.MeasurementCollector.java

@Override
public void run() {
    final Timer.Context context = metrics.batchInsertTime.time();
    final Stopwatch stopwatch = new Stopwatch().start();
    metricsServer.addNumericData(generateData(), new RawDataInsertedCallback() {
        @Override//  w  w  w  .j  a  v a 2 s  .c o m
        public void onFinish() {
            stopwatch.stop();
            log.info("Finished inserting raw data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
            context.stop();
        }

        @Override
        public void onSuccess(MeasurementDataNumeric result) {
            metrics.rawInserts.mark();
        }

        @Override
        public void onFailure(Throwable t) {
            log.warn("Failed to insert raw data", t);
        }
    });
}