Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:cosmos.mapred.MediawikiQueries.java

public long columnFetch(Store id, Column colToFetch, Map<Column, Long> counts, long totalResults)
        throws Exception {
    Stopwatch sw = new Stopwatch();
    String prev = null;// w  w w  . j a  v  a 2 s .  co  m
    String lastDocId = null;
    long resultCount = 0l;

    sw.start();
    final CloseableIterable<MultimapRecord> results = this.sorts.fetch(id, Index.define(colToFetch));
    Iterator<MultimapRecord> resultsIter = results.iterator();

    for (; resultsIter.hasNext();) {
        MultimapRecord r = resultsIter.next();

        sw.stop();
        resultCount++;

        Collection<RecordValue<?>> values = r.get(colToFetch);

        TreeSet<RecordValue<?>> sortedValues = Sets.newTreeSet(values);

        if (null == prev) {
            prev = sortedValues.first().value().toString();
        } else {
            boolean plausible = false;
            Iterator<RecordValue<?>> iter = sortedValues.iterator();
            for (; !plausible && iter.hasNext();) {
                String val = iter.next().value().toString();
                if (prev.compareTo(val) <= 0) {
                    plausible = true;
                }
            }

            if (!plausible) {
                System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - " + lastDocId
                        + " shouldn't have come before " + r.docId());
                System.out.println(prev + " compared to " + sortedValues);
                results.close();
                System.exit(1);
            }
        }

        lastDocId = r.docId();

        sw.start();
    }

    sw.stop();

    System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Took " + sw.toString()
            + " to fetch results");
    logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "fetch:" + colToFetch);

    results.close();

    long expected = counts.containsKey(colToFetch) ? counts.get(colToFetch) : -1;

    if (resultCount != expected) {
        System.out.println(Thread.currentThread().getName() + " " + colToFetch + ": Expected to get " + expected
                + " records but got " + resultCount);
        System.exit(1);
    }

    return resultCount;
}

From source file:com.palantir.common.base.PrefetchingBatchingVisitable.java

@Override
public <K extends Exception> boolean batchAccept(final int batchSize, AbortingVisitor<? super List<T>, K> v)
        throws K {
    final Queue<List<T>> queue = Queues.newArrayDeque();
    final Lock lock = new ReentrantLock();
    final Condition itemAvailable = lock.newCondition();
    final Condition spaceAvailable = lock.newCondition();
    final AtomicBoolean futureIsDone = new AtomicBoolean(false);
    final AtomicReference<Throwable> exception = new AtomicReference<Throwable>();
    final Stopwatch fetchTime = Stopwatch.createUnstarted();
    final Stopwatch fetchBlockedTime = Stopwatch.createUnstarted();
    final Stopwatch visitTime = Stopwatch.createUnstarted();
    final Stopwatch visitBlockedTime = Stopwatch.createUnstarted();

    Future<?> future = exec.submit(new Runnable() {
        @Override//from  w w  w .  j a v  a  2  s . co  m
        public void run() {
            try {
                fetchTime.start();
                delegate.batchAccept(batchSize, new AbortingVisitor<List<T>, InterruptedException>() {
                    @Override
                    public boolean visit(List<T> item) throws InterruptedException {
                        fetchTime.stop();
                        fetchBlockedTime.start();
                        lock.lock();
                        try {
                            while (queue.size() >= capacity) {
                                spaceAvailable.await();
                            }
                            fetchBlockedTime.stop();
                            queue.add(item);
                            itemAvailable.signalAll();
                        } finally {
                            lock.unlock();
                        }
                        fetchTime.start();
                        return true;
                    }
                });
                fetchTime.stop();
            } catch (InterruptedException e) {
                // shutting down
            } catch (Throwable t) {
                exception.set(t);
            } finally {
                if (fetchTime.isRunning()) {
                    fetchTime.stop();
                }
                if (fetchBlockedTime.isRunning()) {
                    fetchBlockedTime.stop();
                }
                lock.lock();
                try {
                    futureIsDone.set(true);
                    itemAvailable.signalAll();
                } finally {
                    lock.unlock();
                }
            }
        }
    });

    try {
        while (true) {
            List<T> batch;
            visitBlockedTime.start();
            lock.lock();
            try {
                while (queue.isEmpty()) {
                    if (futureIsDone.get()) {
                        if (exception.get() != null) {
                            throw Throwables.rewrapAndThrowUncheckedException(exception.get());
                        }
                        return true;
                    }
                    itemAvailable.await();
                }
                batch = queue.poll();
                spaceAvailable.signalAll();
            } finally {
                lock.unlock();
            }
            visitBlockedTime.stop();
            visitTime.start();
            boolean proceed = v.visit(batch);
            visitTime.stop();
            if (!proceed) {
                return false;
            }
        }
    } catch (InterruptedException e) {
        throw Throwables.rewrapAndThrowUncheckedException(e);
    } finally {
        log.debug("{} timings: fetch {}, fetchBlocked {}, visit {}, visitBlocked {}", name, fetchTime,
                fetchBlockedTime, visitTime, visitBlockedTime);
        future.cancel(true);
    }
}

From source file:com.google.api.ads.adwords.awalerting.processor.AlertRulesProcessor.java

/**
 * Process the ReportData list with the alert rules, each report with all rules per thread.
 *
 * @param reports the list of ReportData to run each alert action against
 *///from  w w  w.  j ava2 s .  c  om
public void processReports(List<ReportData> reports) throws AlertProcessingException {
    // Create one thread for each report, and apply all alert rules in sequence
    Stopwatch stopwatch = Stopwatch.createStarted();

    CountDownLatch latch = new CountDownLatch(reports.size());
    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    for (ReportData report : reports) {
        RunnableAlertRulesProcessor rulesProcessor = new RunnableAlertRulesProcessor(report, rules,
                alertMessage);
        executeRunnableAlertRulesProcessor(executorService, rulesProcessor, latch);
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new AlertProcessingException("AlertRulesProcessor encounters InterruptedException.", e);
    }

    executorService.shutdown();
    stopwatch.stop();

    LOGGER.info("*** Processed {} rules and add alert messages on {} reports in {} seconds.", rules.size(),
            reports.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000);
}

From source file:qa.qcri.nadeef.core.util.sql.PostgresSQLDialect.java

/**
 * {@inheritDoc}//  w  w  w  . j  a  v a2  s .co m
 */
@Override
public int bulkLoad(DBConfig dbConfig, String tableName, Path file, boolean skipHeader) {
    Tracer tracer = Tracer.getTracer(PostgresSQLDialect.class);
    tracer.info("Bulk load CSV file " + file.toString());
    try (Connection conn = DBConnectionPool.createConnection(dbConfig, true);
            FileReader reader = new FileReader(file.toFile())) {
        Stopwatch watch = Stopwatch.createStarted();
        Schema schema = DBMetaDataTool.getSchema(dbConfig, tableName);
        StringBuilder builder = new StringBuilder();
        for (Column column : schema.getColumns()) {
            if (column.getColumnName().equalsIgnoreCase("TID"))
                continue;
            builder.append(column.getColumnName()).append(",");
        }
        builder.deleteCharAt(builder.length() - 1);

        CopyManager copyManager = new CopyManager((BaseConnection) conn);
        String sql = String.format("COPY %s (%s) FROM STDIN WITH (FORMAT 'csv', DELIMITER ',', HEADER %s)",
                tableName, builder.toString(), skipHeader ? "true" : "false");
        copyManager.copyIn(sql, reader);
        watch.stop();
        tracer.info("Bulk load finished in " + watch.elapsed(TimeUnit.MILLISECONDS) + " ms");
    } catch (Exception ex) {
        tracer.err("Loading csv file " + file.getFileName() + " failed.", ex);
        return 1;
    }
    return 0;
}

From source file:org.obiba.opal.shell.commands.ImportCommand.java

@Override
public int execute() {
    int errorCode;

    Stopwatch stopwatch = Stopwatch.createStarted();

    List<FileObject> filesToImport = getFilesToImport();
    errorCode = executeImports(filesToImport);

    if (!options.isSource() & !options.isTables() & filesToImport.isEmpty()) {
        // Should this be considered success or an error? Will treat as an error for now.
        getShell().printf("No file, source or tables provided. Import canceled.\n");
        errorCode = CRITICAL_ERROR;/*w  w w. j  a va  2 s  . c om*/
    } else if (errorCode != SUCCESS) {
        getShell().printf("Import failed.\n");
        log.info("Import failed in {}", stopwatch.stop());
    } else {
        getShell().printf("Import done.\n");
        log.info("Import succeed in {}", stopwatch.stop());
    }
    return errorCode;
}

From source file:com.vmware.photon.controller.apife.backends.VmSqlBackend.java

@Override
@Transactional//w w  w.java2s.c  om
public void tombstone(VmEntity vm) throws ExternalException {
    Stopwatch tombstoneWatch = Stopwatch.createStarted();
    vmDao.update(vm);
    String resourceTickedId = projectBackend.findById(vm.getProjectId()).getResourceTicketId();
    ImageEntity image = null;
    if (StringUtils.isNotBlank(vm.getImageId())) {
        image = imageBackend.findById(vm.getImageId());
    }

    FlavorEntity flavor = null;
    if (StringUtils.isNotBlank(vm.getFlavorId())) {
        flavor = flavorBackend.getEntityById(vm.getFlavorId());
    }

    Stopwatch resourceTicketWatch = Stopwatch.createStarted();
    resourceTicketBackend.returnQuota(resourceTickedId, new QuotaCost(vm.getCost()));
    resourceTicketWatch.stop();
    logger.info("VmSqlBackend.tombstone for Vm Id: {}, resourceTicket {}, returnQuota in {} milliseconds",
            vm.getId(), resourceTickedId, resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS));

    tombstoneBackend.create(Vm.KIND, vm.getId());
    vmDao.delete(vm);
    for (AttachedDiskEntity attachedDisk : attachedDiskBackend.findByVmId(vm.getId())) {
        attachedDiskBackend.deleteAttachedDiskById(attachedDisk.getId());
    }

    if (image != null && ImageState.PENDING_DELETE.equals(image.getState())
            && vmDao.listByImage(image.getId()).isEmpty()) {
        imageBackend.tombstone(image);
    }

    if (flavor != null && FlavorState.PENDING_DELETE.equals(flavor.getState())
            && vmDao.listByFlavor(flavor.getId()).isEmpty()) {
        flavorBackend.tombstone(flavor);
    }

    tombstoneWatch.stop();
    logger.info("VmSqlBackend.tombstone for Vm Id: {} took {} milliseconds", vm.getId(),
            tombstoneWatch.elapsed(TimeUnit.MILLISECONDS));
}

From source file:brooklyn.rest.filter.LoggingFilter.java

@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
        throws IOException, ServletException {
    HttpServletRequest httpRequest = (HttpServletRequest) request;
    HttpServletResponse httpResponse = (HttpServletResponse) response;

    String uri = httpRequest.getRequestURI();
    String rid = RequestTaggingFilter.getTag();
    boolean isInteresting = INTERESTING_METHODS.contains(httpRequest.getMethod()),
            shouldLog = (isInteresting && LOG.isDebugEnabled()) || LOG.isTraceEnabled(), requestErrored = false;
    Stopwatch timer = Stopwatch.createUnstarted();
    try {// w w w  .  ja  v a 2s .  co  m
        if (shouldLog) {
            String message = "{} starting request {} {}";
            Object[] args = new Object[] { rid, httpRequest.getMethod(), uri };
            if (isInteresting) {
                LOG.debug(message, args);
            } else {
                LOG.trace(message, args);
            }
        }

        timer.start();
        chain.doFilter(request, response);

    } catch (Throwable e) {
        requestErrored = true;
        LOG.warn("REST API request " + rid + " failed: " + e, e);
        // Propagate for handling by other filter
        throw Exceptions.propagate(e);
    } finally {
        timer.stop();
        // This logging must not happen before chain.doFilter, or FormMapProvider will not work as expected.
        // Getting the parameter map consumes the request body and only resource methods using @FormParam
        // will work as expected.
        if (requestErrored || shouldLog) {
            boolean includeHeaders = requestErrored || httpResponse.getStatus() / 100 == 5
                    || LOG.isTraceEnabled();
            String message = getRequestCompletedMessage(includeHeaders, Duration.of(timer), rid, httpRequest,
                    httpResponse);
            if (requestErrored || isInteresting) {
                LOG.debug(message);
            } else {
                LOG.trace(message);
            }
        }
    }
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("hdfs/path/{cluster}/")
@Produces(MediaType.APPLICATION_JSON)/* w ww  . java  2  s.c  om*/
public List<HdfsStats> getHdfsPathTimeSeriesStats(@PathParam("cluster") String cluster,
        @QueryParam("path") String path, @QueryParam("starttime") long starttime,
        @QueryParam("endtime") long endtime, @QueryParam("limit") int limit) throws IOException {
    if (StringUtils.isEmpty(path)) {
        throw new RuntimeException("Required query param missing: path ");
    }

    if (limit == 0) {
        limit = HdfsConstants.RECORDS_RETURNED_LIMIT;
    }

    if (starttime == 0L) {
        // default it to current hour's top
        long lastHour = System.currentTimeMillis();
        // convert milliseconds to seconds
        starttime = lastHour / 1000L;
    }

    if (endtime == 0L) {
        // default it to one week ago
        endtime = starttime - 7 * 86400;
    }

    if (endtime > starttime) {
        throw new RuntimeException("Ensure endtime " + endtime + " is older than starttime " + starttime);
    }

    LOG.info(String.format(
            "Fetching hdfs timeseries stats for cluster=%s, path=%s limit=%d, starttime=%d endtime=%d", cluster,
            path, limit, starttime, endtime));
    Stopwatch timer = new Stopwatch().start();
    List<HdfsStats> hdfsStats = getHdfsStatsService().getHdfsTimeSeriesStats(cluster, path, limit, starttime,
            endtime);
    timer.stop();

    if (hdfsStats != null) {
        LOG.info("For hdfs/path/{cluster}/{attribute} with input query " + "hdfs/path/" + cluster + "?limit="
                + limit + "&path=" + path + " fetched #number of HdfsStats " + hdfsStats.size() + " in "
                + timer);
    } else {
        LOG.info("For hdfs/path/{cluster}/{attribute} with input query " + "hdfs/path/" + cluster + "?limit="
                + limit + "&path=" + path + " fetched 0 HdfsStats in " + timer);
    }

    // export latency metrics
    HravenResponseMetrics.HDFS_TIMESERIES_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return hdfsStats;
}

From source file:com.google.api.ads.adwords.awreporting.processors.onmemory.ReportProcessorOnMemory.java

/**
 * Generate all the mapped reports to the given account IDs.
 *
 * @param dateRangeType the date range type.
 * @param dateStart the starting date.//from www . ja  v  a2  s .c o m
 * @param dateEnd the ending date.
 * @param accountIdsSet the account IDs.
 * @param properties the properties file
 * @throws Exception error reaching the API.
 */
@Override
public void generateReportsForMCC(String mccAccountId, ReportDefinitionDateRangeType dateRangeType,
        String dateStart, String dateEnd, Set<Long> accountIdsSet, Properties properties,
        ReportDefinitionReportType onDemandReportType, List<String> reportFieldsToInclude) throws Exception {

    LOGGER.info("*** Retrieving account IDs ***");

    if (accountIdsSet == null || accountIdsSet.size() == 0) {
        accountIdsSet = this.retrieveAccountIds(mccAccountId);
    } else {
        LOGGER.info("Accounts loaded from file.");
    }

    AdWordsSessionBuilderSynchronizer sessionBuilder = new AdWordsSessionBuilderSynchronizer(
            authenticator.authenticate(mccAccountId, false), getIncludeZeroImpressions(properties));

    LOGGER.info("*** Generating Reports for " + accountIdsSet.size() + " accounts ***");

    Stopwatch stopwatch = Stopwatch.createStarted();

    // reports
    Set<ReportDefinitionReportType> reports = this.csvReportEntitiesMapping.getDefinedReports();

    Set<Object> propertiesKeys = properties.keySet();
    for (Object key : propertiesKeys) {

        String reportDefinitionKey = key.toString();
        ReportDefinitionReportType reportType = this.extractReportTypeFromKey(reportDefinitionKey);
        if (reportType != null && reports.contains(reportType)) {
            this.downloadAndProcess(mccAccountId, sessionBuilder, reportType, dateRangeType, dateStart, dateEnd,
                    accountIdsSet, reportDefinitionKey, properties);
        }
    }

    stopwatch.stop();
    LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000)
            + " seconds ***\n");
}

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ConcurrentSparqlGraphStoreManager.java

@Override
public Set<URI> listResourcesByQuery(String queryStr, String variableName) {

    ImmutableSet.Builder<URI> result = ImmutableSet.builder();
    // If the SPARQL endpoint does not exist return immediately.
    if (this.getSparqlQueryEndpoint() == null || queryStr == null || queryStr.isEmpty()) {
        return result.build();
    }//from   w  w  w  .jav a  2s  .  c  o m

    // Query the engine
    log.debug("Evaluating SPARQL query in Knowledge Base: \n {}", queryStr);
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.getSparqlQueryEndpoint().toASCIIString(),
            query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        ResultSet qResults = qexec.execSelect();

        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        Resource resource;
        URI matchUri;
        // Iterate over the results obtained
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();

            // Get the match URL
            resource = soln.getResource(variableName);

            if (resource != null && resource.isURIResource()) {
                matchUri = new URI(resource.getURI());
                result.add(matchUri);
            } else {
                log.warn("Skipping result as the URL is null");
                break;
            }
        }
    } catch (URISyntaxException e) {
        log.error("Error obtaining match result. Expected a correct URI", e);
    } finally {
        qexec.close();
    }
    return result.build();
}