Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ServiceManagerIndexRdf.java

private void indexService(Service service) {
    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();//from  w  w  w  . j  a v  a 2s .  c o m
    List<Operation> operations = service.getOperations();
    Set<URI> svcOps = new HashSet<URI>();
    for (Operation operation : operations) {
        svcOps.add(operation.getUri());
        indexOperation(operation);
    }
    // Set the svcOp map
    this.svcOpMap.put(service.getUri(), svcOps);
    // Index the modelReferences
    indexModelReferences(service);
    stopwatch.stop();
    log.info("Service - {} - indexed. Time taken {}", service.getUri(), stopwatch);
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Scan scan = getScan();/*from w  ww  .j a  v  a  2  s  .  c  om*/

    String jobName = "testSnapshotScanMapReduce";

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setJarByClass(getClass());

    TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, NullWritable.class,
            NullWritable.class, job, true, new Path(restoreDir));

    job.setNumReduceTasks(0);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(NullOutputFormat.class);

    scanTimer.start();
    job.waitForCompletion(true);
    scanTimer.stop();

    Counters counters = job.getCounters();
    long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
    long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();

    long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan mapreduce: ");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");
}

From source file:org.eclipse.recommenders.codesearch.rcp.index.extdoc.LocalExamplesProvider.java

private void startMeasurement() {
    watch = new Stopwatch();
    watch.start();
}

From source file:org.apache.hadoop.hbase.catalog.CatalogTracker.java

/**
 * Waits indefinitely for availability of <code>hbase:meta</code>.  Used during
 * cluster startup.  Does not verify meta, just that something has been
 * set up in zk./*from  www. ja v a2  s  .  c  o m*/
 * @see #waitForMeta(long)
 * @throws InterruptedException if interrupted while waiting
 */
public void waitForMeta() throws InterruptedException {
    Stopwatch stopwatch = new Stopwatch().start();
    while (!this.stopped) {
        try {
            if (waitForMeta(100) != null)
                break;
            long sleepTime = stopwatch.elapsedMillis();
            // +1 in case sleepTime=0
            if ((sleepTime + 1) % 10000 == 0) {
                LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
            }
        } catch (NotAllMetaRegionsOnlineException e) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("hbase:meta still not available, sleeping and retrying." + " Reason: "
                        + e.getMessage());
            }
        }
    }
}

From source file:org.apache.bookkeeper.replication.Auditor.java

public void start() {
    LOG.info("I'm starting as Auditor Bookie. ID: {}", bookieIdentifier);
    // on startup watching available bookie and based on the
    // available bookies determining the bookie failures.
    synchronized (this) {
        if (executor.isShutdown()) {
            return;
        }//from  w  ww .j  a v  a  2 s. c  o  m

        long interval = conf.getAuditorPeriodicCheckInterval();

        if (interval > 0) {
            LOG.info("Auditor periodic ledger checking enabled" + " 'auditorPeriodicCheckInterval' {} seconds",
                    interval);
            executor.scheduleAtFixedRate(new Runnable() {
                public void run() {
                    try {
                        if (!ledgerUnderreplicationManager.isLedgerReplicationEnabled()) {
                            LOG.info("Ledger replication disabled, skipping");
                            return;
                        }

                        Stopwatch stopwatch = new Stopwatch().start();
                        checkAllLedgers();
                        checkAllLedgersTime.registerSuccessfulEvent(stopwatch.stop().elapsedMillis(),
                                TimeUnit.MILLISECONDS);
                    } catch (KeeperException ke) {
                        LOG.error("Exception while running periodic check", ke);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        LOG.error("Interrupted while running periodic check", ie);
                    } catch (BKAuditException bkae) {
                        LOG.error("Exception while running periodic check", bkae);
                    } catch (BKException bke) {
                        LOG.error("Exception running periodic check", bke);
                    } catch (IOException ioe) {
                        LOG.error("I/O exception running periodic check", ioe);
                    } catch (ReplicationException.UnavailableException ue) {
                        LOG.error("Underreplication manager unavailable " + "running periodic check", ue);
                    }
                }
            }, interval, interval, TimeUnit.SECONDS);
        } else {
            LOG.info("Periodic checking disabled");
        }
        try {
            notifyBookieChanges();
            knownBookies = getAvailableBookies();
        } catch (BKException bke) {
            LOG.error("Couldn't get bookie list, exiting", bke);
            submitShutdownTask();
        }

        long bookieCheckInterval = conf.getAuditorPeriodicBookieCheckInterval();
        if (bookieCheckInterval == 0) {
            LOG.info("Auditor periodic bookie checking disabled, running once check now anyhow");
            executor.submit(BOOKIE_CHECK);
        } else {
            LOG.info("Auditor periodic bookie checking enabled"
                    + " 'auditorPeriodicBookieCheckInterval' {} seconds", bookieCheckInterval);
            executor.scheduleAtFixedRate(BOOKIE_CHECK, 0, bookieCheckInterval, TimeUnit.SECONDS);
        }
    }
}

From source file:processing.BM25Calculator.java

private static List<Map<Integer, Double>> startBM25CreationForTagPrediction(BookmarkReader reader,
        int sampleSize, boolean userBased, boolean resBased, int beta) {
    timeString = "";
    int size = reader.getUserLines().size();
    int trainSize = size - sampleSize;
    Stopwatch timer = new Stopwatch();
    timer.start();//from  w w  w .j ava2s .  c  om
    BM25Calculator calculator = new BM25Calculator(reader, trainSize, true, userBased, resBased, beta);
    timer.stop();
    long trainingTime = timer.elapsed(TimeUnit.MILLISECONDS);

    List<Map<Integer, Double>> results = new ArrayList<Map<Integer, Double>>();
    timer = new Stopwatch();
    timer.start();
    for (int i = trainSize; i < size; i++) {
        UserData data = reader.getUserLines().get(i);
        Map<Integer, Double> map = null;
        map = calculator.getRankedTagList(data.getUserID(), data.getWikiID(), true);
        results.add(map);
        //System.out.println(data.getTags() + "|" + map.keySet());
    }
    timer.stop();
    long testTime = timer.elapsed(TimeUnit.MILLISECONDS);
    timeString += ("Full training time: " + trainingTime + "\n");
    timeString += ("Full test time: " + testTime + "\n");
    timeString += ("Average test time: " + testTime / (double) sampleSize) + "\n";
    timeString += ("Total time: " + (trainingTime + testTime) + "\n");

    return results;
}

From source file:org.apache.hadoop.mapred.ReduceTask.java

@Override
@SuppressWarnings("unchecked")
public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
        throws IOException, InterruptedException, ClassNotFoundException {
    job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());

    if (isMapOrReduce()) {
        copyPhase = getProgress().addPhase("copy");
        sortPhase = getProgress().addPhase("sort");
        reducePhase = getProgress().addPhase("reduce");
    }//from w  w  w  .  j a va 2  s  .co  m
    // start thread that will handle communication with parent
    TaskReporter reporter = startReporter(umbilical);

    boolean useNewApi = job.getUseNewReducer();
    initialize(job, getJobID(), reporter, useNewApi);

    // check if it is a cleanupJobTask
    if (jobCleanup) {
        runJobCleanupTask(umbilical, reporter);
        return;
    }
    if (jobSetup) {
        runJobSetupTask(umbilical, reporter);
        return;
    }
    if (taskCleanup) {
        runTaskCleanupTask(umbilical, reporter);
        return;
    }

    // Initialize the codec
    codec = initCodec();
    RawKeyValueIterator rIter = null;
    ShuffleConsumerPlugin shuffleConsumerPlugin = null;

    Class combinerClass = conf.getCombinerClass();
    CombineOutputCollector combineCollector = (null != combinerClass)
            ? new CombineOutputCollector(reduceCombineOutputCounter, reporter, conf)
            : null;

    Class<? extends ShuffleConsumerPlugin> clazz = job.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN, Shuffle.class,
            ShuffleConsumerPlugin.class);

    shuffleConsumerPlugin = ReflectionUtils.newInstance(clazz, job);
    LOG.info("Using ShuffleConsumerPlugin: " + shuffleConsumerPlugin);
    LOG.info("Palladio debug: Starting shuffle");
    long cputimestartofshuffle = getCpuTime(); // getting cpu time for palladio debug
    long usedshuffle1 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
    LOG.info("Palladio debug: Total memory available before call shuffle=" + usedshuffle1);

    Stopwatch sw2 = new Stopwatch().start(); // for Palladio debug shuffle
    ShuffleConsumerPlugin.Context shuffleContext = new ShuffleConsumerPlugin.Context(getTaskID(), job,
            FileSystem.getLocal(job), umbilical, super.lDirAlloc, reporter, codec, combinerClass,
            combineCollector, spilledRecordsCounter, reduceCombineInputCounter, shuffledMapsCounter,
            reduceShuffleBytes, failedShuffleCounter, mergedMapOutputsCounter, taskStatus, copyPhase, sortPhase,
            this, mapOutputFile, localMapFiles);
    shuffleConsumerPlugin.init(shuffleContext);

    rIter = shuffleConsumerPlugin.run();

    // free up the data structures
    mapOutputFilesOnDisk.clear();

    sortPhase.complete(); // sort is complete
    sw2.stop();
    LOG.info("Palladio debug: Elapsed time for shuffle==" + sw2.elapsedMillis());
    long cputimeendofshuffle = getCpuTime(); // getting cpu time for palladio debug
    long usedshuffle2 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
    LOG.info("Palladio debug: Total memory available after call shuffle=" + usedshuffle2);
    long cputimeforshuffle = cputimeendofshuffle - cputimestartofshuffle;
    LOG.info("Palladio debug: CPU time taken for shuffle==" + cputimeforshuffle);

    setPhase(TaskStatus.Phase.REDUCE);
    statusUpdate(umbilical);
    Class keyClass = job.getMapOutputKeyClass();
    Class valueClass = job.getMapOutputValueClass();
    RawComparator comparator = job.getOutputValueGroupingComparator();

    if (useNewApi) {

        Stopwatch sw1 = new Stopwatch().start(); // for Palladio debug
        runNewReducer(job, umbilical, reporter, rIter, comparator, keyClass, valueClass);
        sw1.stop();
        LOG.info("Palladio debug: Elapsed time for reducer=" + sw1.elapsedMillis());
    } else {
        runOldReducer(job, umbilical, reporter, rIter, comparator, keyClass, valueClass);
    }

    shuffleConsumerPlugin.close();
    done(umbilical, reporter);
}

From source file:com.sourcecode.FileInputFormat.java

/** 
 * Generate the list of files and make them into FileSplits.
 * @param job the job context/*  w ww . j av a2 s.co  m*/
 * @throws IOException
 */
public List<InputSplit> getSplits(JobContext job) throws IOException {
    Stopwatch sw = new Stopwatch().start();
    long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
    long maxSize = getMaxSplitSize(job);

    // generate splits
    List<InputSplit> splits = new ArrayList<InputSplit>();
    List<FileStatus> files = listStatus(job);
    for (FileStatus file : files) {
        Path path = file.getPath();
        long length = file.getLen();
        if (length != 0) {
            BlockLocation[] blkLocations;
            if (file instanceof LocatedFileStatus) {
                blkLocations = ((LocatedFileStatus) file).getBlockLocations();
            } else {
                FileSystem fs = path.getFileSystem(job.getConfiguration());
                blkLocations = fs.getFileBlockLocations(file, 0, length);
            }
            if (isSplitable(job, path)) {
                long blockSize = file.getBlockSize();
                long splitSize = computeSplitSize(blockSize, minSize, maxSize);

                long bytesRemaining = length;
                while (((double) bytesRemaining) / splitSize > SPLIT_SLOP) {
                    int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining);
                    splits.add(makeSplit(path, length - bytesRemaining, splitSize,
                            blkLocations[blkIndex].getHosts(), blkLocations[blkIndex].getCachedHosts()));
                    bytesRemaining -= splitSize;
                }

                if (bytesRemaining != 0) {
                    int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining);
                    splits.add(makeSplit(path, length - bytesRemaining, bytesRemaining,
                            blkLocations[blkIndex].getHosts(), blkLocations[blkIndex].getCachedHosts()));
                }
            } else { // not splitable
                splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
                        blkLocations[0].getCachedHosts()));
            }
        } else {
            //Create empty hosts array for zero length files
            splits.add(makeSplit(path, 0, length, new String[0]));
        }
    }
    // Save the number of input files for metrics/loadgen
    job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
    sw.stop();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Total # of splits generated by getSplits: " + splits.size() + ", TimeTaken: "
                + sw.elapsedMillis());
    }
    return splits;
}

From source file:org.caleydo.core.data.collection.table.NumericalTable.java

/**
 *
 *//*from   w  w  w  .j a  v  a  2 s  .c o m*/
private void performImputation(KNNImputeDescription desc) {

    Stopwatch w = new Stopwatch().start();
    ImmutableList.Builder<Gene> b = ImmutableList.builder();
    final int rows = getNrRows();
    final int cols = columns.size();

    // create data
    if (desc.getDimension().isRecord()) {
        for (int i = 0; i < rows; ++i) {
            float[] data = new float[cols];
            int nans = 0;
            int j = 0;
            for (AColumn<?, ?> column : columns) {
                @SuppressWarnings("unchecked")
                NumericalColumn<?, Float> nColumn = (NumericalColumn<?, Float>) column;
                Float raw = nColumn.getRaw(i);
                if (raw == null || raw.isNaN())
                    nans++;
                data[j++] = raw == null ? Float.NaN : raw.floatValue();
            }
            b.add(new Gene(i, nans, data));
        }
    } else {
        int i = 0;
        for (AColumn<?, ?> column : columns) {
            float[] data = new float[rows];
            int nans = 0;
            @SuppressWarnings("unchecked")
            NumericalColumn<?, Float> nColumn = (NumericalColumn<?, Float>) column;

            for (int j = 0; j < rows; j++) {
                Float raw = nColumn.getRaw(i);
                if (raw == null || raw.isNaN())
                    nans++;
                data[j++] = raw == null ? Float.NaN : raw.floatValue();
            }
            b.add(new Gene(i++, nans, data));
        }
    }

    System.out.println("NumericalTable.performImputation() data creation:\t" + w);
    w.reset().start();
    KNNImpute task = new KNNImpute(desc, b.build());
    ForkJoinPool pool = new ForkJoinPool();
    com.google.common.collect.Table<Integer, Integer, Float> impute = pool.invoke(task);
    pool.shutdown();
    System.out.println("NumericalTable.performImputation() computation:\t" + w);
    w.reset().start();

    // update data
    final boolean isColumnFirstDimension = desc.getDimension().isDimension();
    // in either case iterate over the columns first and update a columns at once
    for (Map.Entry<Integer, Map<Integer, Float>> entry : (isColumnFirstDimension ? impute.rowMap()
            : impute.columnMap()).entrySet()) {
        AColumn<?, ?> aColumn = columns.get(entry.getKey().intValue());
        @SuppressWarnings("unchecked")
        NumericalColumn<?, Float> nColumn = (NumericalColumn<?, Float>) aColumn;
        // apply updates
        for (Map.Entry<Integer, Float> entry2 : entry.getValue().entrySet()) {
            nColumn.setRaw(entry2.getKey(), entry2.getValue());
        }
    }
    System.out.println("NumericalTable.performImputation() update:\t" + w);
}

From source file:matching.naive.BranchAndBoundMatching.java

/**
 * Tiny tests / benchmarks.// w w w .  ja v  a2  s .  c o m
 */
public static void _main(final String[] args) {

    /*
    final double[][] matrix =
    {
        { 0, 1, 2, 3 },
        { 1, 0, 10, 20 },
        { 2, 10, 0, 10 },
        { 3, 20, 10, 0 },
    };
    */

    final double[][] matrix = {
            //     A  B  C  D
            /*A*/{ 0, 1, 5, 10 }, /*B*/{ 1, 0, 1, 1 }, /*C*/{ 5, 1, 0, 1 }, /*D*/{ 10, 1, 1, 0 } };
    // TODO ? normaliser la matrice entre 0 et 1 ?
    /*
    final double[][] matrix =
    {
        { 0, 1, 1, 1, 1, 1 },
        { 1, 0, 1, 1, 1, 1 },
        { 1, 1, 0, 1, 1, 1 },
        { 1, 1, 1, 0, 1, 1 },
        { 1, 1, 1, 1, 0, 1 },
        { 1, 1, 1, 1, 1, 0 },
    };
    */

    /*
    final double[][] matrix =
    {
        { 0, 10, 1, 1, 1, 1, 1, 1 },
        { 10, 0, 1, 1, 1, 1, 1, 1 },
        { 1, 1, 0, 1, 1, 1, 1, 1 },
        { 1, 1, 1, 0, 1, 1, 1, 1 },
        { 1, 1, 1, 1, 0, 1, 1, 1 },
        { 1, 1, 1, 1, 1, 0, 1, 1 },
        { 1, 1, 1, 1, 1, 1, 0, 1 },
        { 1, 1, 1, 1, 1, 1, 1, 0 },
    };
    */

    final BranchAndBoundMatching matching = new BranchAndBoundMatching(matrix);
    final List<String> labels = Lists.newArrayList("A", "B", "C", "D", "E", "F");

    final Function<Position, String> mapping = new Function<Position, String>() {

        @Override
        public String apply(final Position position) {
            return labels.get(position.getRowIndex()) + labels.get(position.getColumnIndex());
        }

    };

    final Stopwatch stopwatch = new Stopwatch();

    stopwatch.start();
    //final Match firstMatch = matching.match(Extremum.MIN);
    //System.out.println(firstMatch);
    //System.out.println(firstMatch.apply(mapping));
    final Iterable<Match> matches = matching.matchAll();
    stopwatch.stop();
    for (final Match match : matches)
        System.out.println(match.apply(mapping));
    System.out.println(stopwatch.elapsedTime(TimeUnit.NANOSECONDS) + " " + TimeUnit.NANOSECONDS.toString());

}