Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:cosmos.mapred.MediawikiQueries.java

public void groupBy(Store id, Column colToFetch, Map<Column, Long> columnCounts, long totalResults)
        throws Exception {
    Stopwatch sw = new Stopwatch();

    sw.start();/*  www  .ja v  a 2 s  . c  o  m*/
    final CloseableIterable<Entry<RecordValue<?>, Long>> results = this.sorts.groupResults(id, colToFetch);
    TreeMap<RecordValue<?>, Long> counts = Maps.newTreeMap();

    for (Entry<RecordValue<?>, Long> entry : results) {
        counts.put(entry.getKey(), entry.getValue());
    }

    results.close();
    sw.stop();

    System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Took " + sw.toString()
            + " to group results");
    logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "groupBy:" + colToFetch);

    //    System.out.println(counts);

    final CloseableIterable<MultimapRecord> verifyResults = this.sorts.fetch(id, Index.define(colToFetch));
    TreeMap<RecordValue<?>, Long> records = Maps.newTreeMap();
    for (MultimapRecord r : verifyResults) {
        if (r.containsKey(colToFetch)) {
            for (RecordValue<?> val : r.get(colToFetch)) {
                if (records.containsKey(val)) {
                    records.put(val, records.get(val) + 1);
                } else {
                    records.put(val, 1l);
                }
            }
        }
    }

    verifyResults.close();

    if (counts.size() != records.size()) {
        System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Expected "
                + records.size() + " groups but found " + counts.size());
        System.exit(1);
    }

    Set<RecordValue<?>> countKeys = counts.keySet(), recordKeys = records.keySet();
    for (RecordValue<?> k : countKeys) {
        if (!recordKeys.contains(k)) {
            System.out.println(Thread.currentThread().getName() + ": " + colToFetch
                    + " - Expected to have count for " + k);
            System.exit(1);
        }

        Long actual = counts.get(k), expected = records.get(k);

        if (!actual.equals(expected)) {
            System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Expected " + expected
                    + " value(s) but found " + actual + " value(s) for " + k.value());
            System.exit(1);
        }
    }
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("flow/{cluster}/{user}/{appId}")
@Produces(MediaType.APPLICATION_JSON)//www. j av  a2 s.c o  m
public List<Flow> getJobFlowById(@PathParam("cluster") String cluster, @PathParam("user") String user,
        @PathParam("appId") String appId, @QueryParam("limit") int limit,
        @QueryParam("startTime") long startTime, @QueryParam("endTime") long endTime,
        @QueryParam("include") List<String> include, @QueryParam("includeConf") List<String> includeConfig,
        @QueryParam("includeConfRegex") List<String> includeConfigRegex,
        @QueryParam("includeJobField") List<String> includeJobFields) throws IOException {

    Stopwatch timer = new Stopwatch().start();
    Predicate<String> configFilter = null;
    if (includeConfig != null && !includeConfig.isEmpty()) {
        configFilter = new SerializationContext.FieldNameFilter(includeConfig);
    } else if (includeConfigRegex != null && !includeConfigRegex.isEmpty()) {
        configFilter = new SerializationContext.RegexConfigurationFilter(includeConfigRegex);
    }
    Predicate<String> jobFilter = null;
    if (includeJobFields != null && !includeJobFields.isEmpty()) {
        jobFilter = new SerializationContext.FieldNameFilter(includeJobFields);
    }

    Predicate<String> flowFilter = null;
    if (include != null && !include.isEmpty()) {
        flowFilter = new SerializationContext.FieldNameFilter(include);
    }

    serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING, configFilter,
            flowFilter, jobFilter, null));

    List<Flow> flows = getFlowList(cluster, user, appId, null, startTime, endTime, limit);
    timer.stop();

    StringBuilder builderIncludeConfigs = new StringBuilder();
    for (String s : includeConfig) {
        builderIncludeConfigs.append(s);
    }

    StringBuilder builderIncludeConfigRegex = new StringBuilder();
    for (String s : includeConfig) {
        builderIncludeConfigRegex.append(s);
    }

    if (flows != null) {
        LOG.info("For flow/{cluster}/{user}/{appId} with input query: " + "flow/" + cluster + SLASH + user
                + SLASH + appId + "?limit=" + limit + "&startTime=" + startTime + "&endTime=" + endTime
                + "&includeConf=" + builderIncludeConfigs + "&includeConfRegex=" + builderIncludeConfigRegex
                + StringUtil.buildParam("includeJobField", includeJobFields) + "&"
                + StringUtil.buildParam("include", include) + " fetched " + flows.size() + " flows in "
                + timer);
    } else {
        LOG.info("For flow/{cluster}/{user}/{appId} with input query: " + "flow/" + cluster + SLASH + user
                + SLASH + appId + "?limit=" + limit + "&includeConf=" + builderIncludeConfigs
                + "&includeConfRegex=" + builderIncludeConfigRegex
                + StringUtil.buildParam("includeJobField", includeJobFields) + "&"
                + StringUtil.buildParam("include", include) + " No flows fetched, spent " + timer);
    }

    // export latency metrics
    HravenResponseMetrics.FLOW_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return flows;

}

From source file:org.rhq.server.metrics.MetricsServer.java

public void addNumericData(final Set<MeasurementDataNumeric> dataSet, final RawDataInsertedCallback callback) {
    if (log.isDebugEnabled()) {
        log.debug("Inserting " + dataSet.size() + " raw metrics");
    }/*from w w  w  .ja  va 2 s .  c  om*/
    final Stopwatch stopwatch = new Stopwatch().start();
    final AtomicInteger remainingInserts = new AtomicInteger(dataSet.size());
    // TODO add support for splitting cache index partition
    final int partition = 0;

    for (final MeasurementDataNumeric data : dataSet) {
        DateTime collectionTimeSlice = dateTimeService.getTimeSlice(new DateTime(data.getTimestamp()),
                configuration.getRawTimeSliceDuration());
        Days days = Days.daysBetween(collectionTimeSlice, dateTimeService.now());

        if (days.isGreaterThan(rawDataAgeLimit)) {
            callback.onSuccess(data);
            continue;
        }

        StorageResultSetFuture rawFuture = dao.insertRawData(data);
        StorageResultSetFuture indexFuture = dao.updateIndex(IndexBucket.RAW, collectionTimeSlice.getMillis(),
                data.getScheduleId());
        ListenableFuture<List<ResultSet>> insertsFuture = Futures.successfulAsList(rawFuture, indexFuture);
        Futures.addCallback(insertsFuture, new FutureCallback<List<ResultSet>>() {
            @Override
            public void onSuccess(List<ResultSet> result) {
                callback.onSuccess(data);
                if (remainingInserts.decrementAndGet() == 0) {
                    stopwatch.stop();
                    if (log.isDebugEnabled()) {
                        log.debug("Finished inserting " + dataSet.size() + " raw metrics in "
                                + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
                    }
                    callback.onFinish();
                }
            }

            @Override
            public void onFailure(Throwable t) {
                if (log.isDebugEnabled()) {
                    log.debug("An error occurred while inserting raw data", ThrowableUtil.getRootCause(t));
                } else {
                    log.warn("An error occurred while inserting raw data: " + ThrowableUtil.getRootMessage(t));
                }
                callback.onFailure(t);
            }
        }, tasks);
    }
}

From source file:processing.BLLCalculator.java

private static List<Map<Integer, Double>> startActCreation(BookmarkReader reader, int sampleSize,
        boolean sorting, boolean userBased, boolean resBased, double dVal, int beta, CalculationType cType,
        Double lambda) {//from w  ww .ja v a2  s. co  m
    int size = reader.getBookmarks().size();
    int trainSize = size - sampleSize;

    Stopwatch timer = new Stopwatch();
    timer.start();
    BLLCalculator calculator = new BLLCalculator(reader, trainSize, dVal, beta, userBased, resBased, cType,
            lambda);
    timer.stop();
    long trainingTime = timer.elapsed(TimeUnit.MILLISECONDS);
    List<Map<Integer, Double>> results = new ArrayList<Map<Integer, Double>>();
    if (trainSize == size) {
        trainSize = 0;
    }

    timer.reset();
    timer.start();
    for (int i = trainSize; i < size; i++) { // the test-set
        Bookmark data = reader.getBookmarks().get(i);
        Map<Integer, Double> map = calculator.getRankedTagList(data.getUserID(), data.getResourceID(), sorting,
                cType);
        results.add(map);
    }
    timer.stop();
    long testTime = timer.elapsed(TimeUnit.MILLISECONDS);

    timeString = PerformanceMeasurement.addTimeMeasurement(timeString, true, trainingTime, testTime,
            sampleSize);
    return results;
}

From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.SparqlLogicConceptMatcher.java

/**
 * Obtain all match results for the set of origins that are within a range of match types
 * TODO: This method is buggy. To be fixed
 *
 * @param origins//from   w  ww .j av  a 2 s . com
 * @param minType
 * @param maxType
 * @return
 */
private Table<URI, URI, MatchResult> obtainMatchResults(Set<URI> origins, MatchType minType,
        MatchType maxType) {

    log.debug("Obtain match results for {}, with {} <= Match Result <= {}", origins, minType, maxType);

    Table<URI, URI, MatchResult> result = HashBasedTable.create();
    // Exit fast if no data is provided or no matches can be found
    if (origins == null || origins.isEmpty() || minType.compareTo(maxType) > 0)
        return result;

    // Create the query
    String queryStr = new StringBuffer().append(generateQueryHeader())
            .append(generateRangeMatchWhereClause(origins, minType, maxType)).append(generateQueryFooter())
            .toString();

    log.debug("SPARQL Query generated: \n {}", queryStr);

    // Query the engine
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.sparqlEndpoint.toASCIIString(), query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);
    try {
        Stopwatch stopwatch = new Stopwatch().start();
        ResultSet qResults = qexec.execSelect();
        stopwatch.stop();
        log.debug("Obtained matches for {} concepts within range {} - {} in {}", origins.size(), minType,
                maxType, stopwatch);

        Resource origin;
        Resource destination;
        URI originUri;
        URI matchUri;
        int index = 0;
        // Iterate over the results obtained starting with the matches for class0 onwards
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();
            origin = soln.getResource(ORIGIN_VAR);
            destination = soln.getResource(MATCH_VAR + index);

            if (origin != null && origin.isURIResource() && destination != null
                    && destination.isURIResource()) {
                originUri = new URI(origin.getURI());
                matchUri = new URI(destination.getURI());
                MatchType type = getMatchType(soln);
                result.put(originUri, matchUri, new AtomicMatchResult(originUri, matchUri, type, this));
                log.debug("Concept {} was matched to {} with type {}", originUri, matchUri, type);
            } else {
                log.warn("Skipping result as some URI is null: Origin - {}, Destination - {}", origin,
                        destination);
                break;
            }
        }
    } catch (URISyntaxException e) {
        log.error("Error obtaining match result. Expected a correct URI", e);
    } finally {
        qexec.close();
    }
    return result;

}

From source file:com.twitter.hraven.datasource.JobHistoryService.java

/**
 * Returns a list of {@link Flow} instances generated from the given results.
 * For the moment, this assumes that the given scanner provides results
 * ordered first by flow ID.//from  ww  w .ja v a2  s  .  c  o m
 * 
 * @param scan
 *          the Scan instance setup for retrieval
 * @return
 */
private List<Flow> createFromResults(Scan scan, boolean populateTasks, int maxCount) throws IOException {
    List<Flow> flows = new ArrayList<Flow>();
    ResultScanner scanner = null;
    try {
        Stopwatch timer = new Stopwatch().start();
        Stopwatch timerJob = new Stopwatch();
        int rowCount = 0;
        long colCount = 0;
        long resultSize = 0;
        int jobCount = 0;
        scanner = historyTable.getScanner(scan);
        Flow currentFlow = null;
        for (Result result : scanner) {
            if (result != null && !result.isEmpty()) {
                rowCount++;
                colCount += result.size();
                resultSize += result.getWritableSize();
                JobKey currentKey = jobKeyConv.fromBytes(result.getRow());
                // empty runId is special cased -- we need to treat each job as it's own flow
                if (currentFlow == null || !currentFlow.contains(currentKey) || currentKey.getRunId() == 0) {
                    // return if we've already hit the limit
                    if (flows.size() >= maxCount) {
                        break;
                    }
                    currentFlow = new Flow(new FlowKey(currentKey));
                    flows.add(currentFlow);
                }
                timerJob.start();
                JobDetails job = new JobDetails(currentKey);
                job.populate(result);
                currentFlow.addJob(job);
                jobCount++;
                timerJob.stop();
            }
        }
        timer.stop();
        LOG.info("Fetched from hbase " + rowCount + " rows, " + colCount + " columns, " + flows.size()
                + " flows and " + jobCount + " jobs taking up " + resultSize + " bytes ( "
                + (double) resultSize / (1024.0 * 1024.0) + " atomic double: "
                + new AtomicDouble(resultSize / (1024.0 * 1024.0)) + ") MB, in total time of " + timer
                + " with  " + timerJob + " spent inJobDetails & Flow population");

        // export the size of data fetched from hbase as a metric
        HravenResponseMetrics.FLOW_HBASE_RESULT_SIZE_VALUE.set((double) (resultSize / (1024.0 * 1024.0)));
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }

    if (populateTasks) {
        populateTasks(flows);
    }

    return flows;
}

From source file:org.caleydo.core.util.clusterer.algorithm.tree.TreeClusterer.java

@Override
protected PerspectiveInitializationData cluster() {
    int r = 0;//from w  w  w .j av  a2 s.  c  o m

    Stopwatch w = new Stopwatch().start();
    r = determineSimilarities();
    System.out.println("determine similarties: " + w);
    w.stop().reset();
    if (r < 0) {
        progress(100);
        return null;
    }

    TreeClusterConfiguration tConfig = (TreeClusterConfiguration) config.getClusterAlgorithmConfiguration();

    Node[] result;

    w.start();
    switch (tConfig.getTreeClustererAlgo()) {
    case COMPLETE_LINKAGE:
        result = pmlcluster();
        System.out.println("pmlcluster: " + w);
        break;
    case AVERAGE_LINKAGE:

        result = palcluster();
        System.out.println("palcluster: " + w);
        break;
    case SINGLE_LINKAGE:
        result = pslcluster();
        System.out.println("pslcluster: " + w);
        break;
    default:
        throw new IllegalStateException("Unkonwn cluster type: " + tConfig.getTreeClustererAlgo());
    }
    if (result == null)
        return null;
    w.reset().start();
    PerspectiveInitializationData p = convert(result);
    System.out.println("convert: " + w);
    return p;
}

From source file:cosmos.impl.CosmosImpl.java

@Override
public CloseableIterable<Column> columns(Store id) throws TableNotFoundException, UnexpectedStateException {
    checkNotNull(id);/*from w ww . j  av  a  2 s  .  c o  m*/

    final String description = "Cosmos:columns";
    Stopwatch sw = new Stopwatch().start();

    try {
        State s = PersistedStores.getState(id);

        if (!State.LOADING.equals(s) && !State.LOADED.equals(s)) {
            // Stopwatch stopped by finally
            throw unexpectedState(id, new State[] { State.LOADING, State.LOADED }, s);
        }

        return PersistedStores.columns(id, description, sw);
    } catch (TableNotFoundException e) {
        sw.stop();
        id.tracer().addTiming(description, sw.elapsed(TimeUnit.MILLISECONDS));
        throw e;
    } catch (UnexpectedStateException e) {
        sw.stop();
        id.tracer().addTiming(description, sw.elapsed(TimeUnit.MILLISECONDS));
        throw e;
    }
}

From source file:org.apache.bookkeeper.replication.Auditor.java

@SuppressWarnings("unchecked")
private void auditBookies() throws BKAuditException, KeeperException, InterruptedException, BKException {
    try {//from   w  ww .  jav a  2 s.c  om
        waitIfLedgerReplicationDisabled();
    } catch (UnavailableException ue) {
        LOG.error("Underreplication unavailable, skipping audit." + "Will retry after a period");
        return;
    }

    Stopwatch stopwatch = new Stopwatch().start();
    // put exit cases here
    Map<String, Set<Long>> ledgerDetails = generateBookie2LedgersIndex();
    try {
        if (!ledgerUnderreplicationManager.isLedgerReplicationEnabled()) {
            // has been disabled while we were generating the index
            // discard this run, and schedule a new one
            executor.submit(BOOKIE_CHECK);
            return;
        }
    } catch (UnavailableException ue) {
        LOG.error("Underreplication unavailable, skipping audit." + "Will retry after a period");
        return;
    }

    List<String> availableBookies = getAvailableBookies();
    // find lost bookies
    Set<String> knownBookies = ledgerDetails.keySet();
    Collection<String> lostBookies = CollectionUtils.subtract(knownBookies, availableBookies);

    bookieToLedgersMapCreationTime.registerSuccessfulEvent(stopwatch.elapsedMillis(), TimeUnit.MILLISECONDS);
    if (lostBookies.size() > 0) {
        handleLostBookies(lostBookies, ledgerDetails);
        uRLPublishTimeForLostBookies.registerSuccessfulEvent(stopwatch.stop().elapsedMillis(),
                TimeUnit.MILLISECONDS);
    }

}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("flowStats/{cluster}/{user}/{appId}")
@Produces(MediaType.APPLICATION_JSON)// ww  w .  ja v  a2  s.co  m
public PaginatedResult<Flow> getJobFlowStats(@PathParam("cluster") String cluster,
        @PathParam("user") String user, @PathParam("appId") String appId, @QueryParam("version") String version,
        @QueryParam("startRow") String startRowParam, @QueryParam("startTime") long startTime,
        @QueryParam("endTime") long endTime, @QueryParam("limit") @DefaultValue("100") int limit,
        @QueryParam("include") List<String> include, @QueryParam("includeJobs") boolean includeJobs,
        @QueryParam("includeJobField") List<String> includeJobFields) throws IOException {
    LOG.info("Fetching flowStats for flowStats/{cluster}/{user}/{appId} with input query: " + "flowStats/"
            + cluster + SLASH + " user " + user + appId + "?version=" + version + "&limit=" + limit
            + "&startRow=" + startRowParam + "&startTime=" + startTime + "&endTime=" + endTime + "&includeJobs="
            + includeJobs);

    Stopwatch timer = new Stopwatch().start();
    byte[] startRow = null;
    if (startRowParam != null) {
        startRow = Base64.decode(startRowParam);
    }

    Predicate<String> flowFilter = null;
    if (include != null && !include.isEmpty()) {
        flowFilter = new SerializationContext.FieldNameFilter(include);
    }

    if (includeJobs) {
        Predicate<String> jobFilter = null;
        if (includeJobFields != null && !includeJobFields.isEmpty()) {
            jobFilter = new SerializationContext.FieldNameFilter(includeJobFields);
        }

        serializationContext.set(
                new SerializationContext(SerializationContext.DetailLevel.FLOW_SUMMARY_STATS_WITH_JOB_STATS,
                        null, flowFilter, jobFilter, null));
    } else {
        serializationContext.set(new SerializationContext(
                SerializationContext.DetailLevel.FLOW_SUMMARY_STATS_ONLY, null, flowFilter, null, null));
    }

    if (endTime == 0) {
        endTime = Long.MAX_VALUE;
    }

    if ((limit == 0) || (limit == Integer.MAX_VALUE)) {
        limit = Integer.MAX_VALUE - 1;
    }

    List<Flow> flows = getJobHistoryService().getFlowTimeSeriesStats(cluster, user, appId, version, startTime,
            endTime, limit + 1, startRow);
    PaginatedResult<Flow> flowStatsPage = new PaginatedResult<Flow>(limit);
    // add request parameters
    flowStatsPage.addRequestParameter("user", user);
    flowStatsPage.addRequestParameter("appId", appId);
    if (StringUtils.isNotBlank(version)) {
        flowStatsPage.addRequestParameter("version", version);
    } else {
        flowStatsPage.addRequestParameter("version", "all");
    }

    flowStatsPage.addRequestParameter("startTime", Long.toString(startTime));
    flowStatsPage.addRequestParameter("endTime", Long.toString(endTime));
    flowStatsPage.addRequestParameter("limit", Integer.toString(limit));

    if (startRow != null) {
        flowStatsPage.addRequestParameter("startRow", startRowParam);
    }

    if (includeJobs) {
        flowStatsPage.addRequestParameter("includeJobs", "true");
    } else {
        flowStatsPage.addRequestParameter("includeJobs", "false");
    }

    if (flows.size() > limit) {
        // copy over the last excluding the last element
        // the last element is the start row for next page
        flowStatsPage.setValues(flows.subList(0, limit));
        flowStatsPage.setNextStartRow(new FlowKeyConverter().toBytes(flows.get(limit).getFlowKey()));
    } else {
        flowStatsPage.setNextStartRow(null);
        flowStatsPage.setValues(flows);
    }
    timer.stop();

    LOG.info("For flowStats/{cluster}/{user}/{appId} with input query: " + "flowStats/" + cluster + SLASH // + user /{appId} cluster + " user " + user
            + appId + "?version=" + version + "&limit=" + limit + "&startRow=" + startRow + "&startTime="
            + startTime + "&endTime=" + endTime + "&includeJobs=" + includeJobs + "&"
            + StringUtil.buildParam("includeJobField", includeJobFields) + " fetched " + flows.size() + " in "
            + timer);

    // export latency metrics
    HravenResponseMetrics.FLOW_STATS_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return flowStatsPage;
}