Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:org.zephyrsoft.sdb2.service.IndexerServiceImpl.java

@Override
public void index(final IndexType indexType, final Collection<Song> songs) {
    executor.execute(new Runnable() {
        @Override//from w w  w .ja v  a2s. co  m
        public void run() {
            Stopwatch stopwatch = Stopwatch.createStarted();

            Directory directory = new RAMDirectory();
            try {
                LOG.debug("available tokenizers: {}", TokenizerFactory.availableTokenizers());
                LOG.debug("available token filters: {}", TokenFilterFactory.availableTokenFilters());
                Analyzer analyzer = CustomAnalyzer.builder().withTokenizer("standard")
                        .addTokenFilter("lowercase")
                        .addTokenFilter("ngram", "minGramSize", "1", "maxGramSize", "25").build();
                IndexWriterConfig config = new IndexWriterConfig(analyzer);
                try (IndexWriter writer = new IndexWriter(directory, config)) {
                    for (Song song : songs) {
                        Document document = createDocument(song);
                        writer.addDocument(document);
                        songByUuid.put(song.getUUID(), song);
                    }
                } catch (IOException e) {
                    LOG.warn("couldn't index songs", e);
                }
            } catch (IOException e1) {
                LOG.warn("couldn't create analyzer", e1);
            } finally {
                putIndex(indexType, directory);
                stopwatch.stop();
                LOG.info("indexing songs in background thread took {}", stopwatch.toString());
            }
        }
    });
}

From source file:com.google.gerrit.server.index.group.AllGroupsIndexer.java

private SiteIndexer.Result reindexGroups(GroupIndex index, List<AccountGroup.UUID> uuids,
        ProgressMonitor progress) {
    progress.beginTask("Reindexing groups", uuids.size());
    List<ListenableFuture<?>> futures = new ArrayList<>(uuids.size());
    AtomicBoolean ok = new AtomicBoolean(true);
    AtomicInteger done = new AtomicInteger();
    AtomicInteger failed = new AtomicInteger();
    Stopwatch sw = Stopwatch.createStarted();
    for (AccountGroup.UUID uuid : uuids) {
        String desc = "group " + uuid;
        ListenableFuture<?> future = executor.submit(() -> {
            try {
                AccountGroup oldGroup = groupCache.get(uuid);
                if (oldGroup != null) {
                    groupCache.evict(oldGroup);
                }/*  ww w .  j a  v a2s  . c om*/
                index.replace(groupCache.get(uuid));
                verboseWriter.println("Reindexed " + desc);
                done.incrementAndGet();
            } catch (Exception e) {
                failed.incrementAndGet();
                throw e;
            }
            return null;
        });
        addErrorListener(future, desc, progress, ok);
        futures.add(future);
    }

    try {
        Futures.successfulAsList(futures).get();
    } catch (ExecutionException | InterruptedException e) {
        log.error("Error waiting on group futures", e);
        return new SiteIndexer.Result(sw, false, 0, 0);
    }

    progress.endTask();
    return new SiteIndexer.Result(sw, ok.get(), done.get(), failed.get());
}

From source file:org.hawkular.metrics.core.jobs.TempDataCompressor.java

@Override
public Completable call(JobDetails jobDetails) {
    Duration runtimeBlockSize = Duration.standardHours(2);

    Trigger trigger = jobDetails.getTrigger();
    DateTime timeSliceInclusive = new DateTime(trigger.getTriggerTime(), DateTimeZone.UTC)
            .minus(runtimeBlockSize);//from w ww  .  j  a  v a2  s.c  o m

    // Rewind to previous timeslice
    DateTime timeSliceStart = DateTimeService.getTimeSlice(timeSliceInclusive, runtimeBlockSize);
    long startOfSlice = timeSliceStart.getMillis();

    Stopwatch stopwatch = Stopwatch.createStarted();
    logger.infof("Starting to process temp table for starting time of %s", timeSliceStart.toString());

    // TODO Optimization - new worker per token - use parallelism in Cassandra (with configured parallelism)
    return metricsService.compressBlock(startOfSlice, pageSize, maxReadConcurrency).doOnCompleted(() -> {
        stopwatch.stop();
        logger.info("Finished processing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
    });
}

From source file:com.google.api.ads.adwords.awreporting.downloader.MultipleClientReportDownloader.java

/**
 * Downloads the specified report for all specified CIDs. Prints out list of failed CIDs. Returns
 * List<File> for all successful downloads.
 *
 * @param reportDefinition Report to download.
 * @param cids CIDs to download the report for.
 * @return Collection of File objects of downloaded/unzipped reports.
 * @throws InterruptedException error trying to stop downloader thread.
 * @throws ValidationException //from   w w w . java2  s .com
 */
public Collection<File> downloadReports(final AdWordsSessionBuilderSynchronizer sessionBuilder,
        final ReportDefinition reportDefinition, final Set<Long> accountIds)
        throws InterruptedException, ValidationException {

    final Collection<Long> failed = new ConcurrentSkipListSet<Long>();
    final Collection<File> results = new ConcurrentSkipListSet<File>();

    // We use a Latch so the main thread knows when all the worker threads are complete.
    final CountDownLatch latch = new CountDownLatch(accountIds.size());

    Stopwatch stopwatch = Stopwatch.createStarted();

    for (final Long accountId : accountIds) {

        // We create a copy of the AdWordsSession specific for the Account
        AdWordsSession adWordsSession = sessionBuilder.getAdWordsSessionCopy(accountId);

        RunnableDownloader downloader = new RunnableDownloader(this.retriesCount, this.backoffInterval,
                this.bufferSize, accountId, reportDefinition, adWordsSession, results);
        downloader.setFailed(failed);
        executeRunnableDownloader(downloader, latch);
    }

    latch.await();
    stopwatch.stop();
    return this.printResultsAndReturn(results, stopwatch.elapsed(TimeUnit.MILLISECONDS), failed, accountIds);
}

From source file:org.graylog2.system.jobs.SystemJobManager.java

public String submitWithDelay(final SystemJob job, final long delay, TimeUnit timeUnit)
        throws SystemJobConcurrencyException {
    // for immediate jobs, check allowed concurrency right now
    if (delay == 0) {
        checkAllowedConcurrency(job);/*from  w  ww  .jav  a 2 s  .c  o  m*/
    }

    final String jobClass = job.getClass().getCanonicalName();

    job.setId(new UUID().toString());
    jobs.put(job.getId(), job);

    executor.schedule(new Runnable() {
        @Override
        public void run() {
            try {
                if (delay > 0) {
                    checkAllowedConcurrency(job);
                }
                job.markStarted();

                final Stopwatch x = Stopwatch.createStarted();

                job.execute(); // ... blocks until it finishes.
                x.stop();

                final String msg = "SystemJob <" + job.getId() + "> [" + jobClass + "] finished in "
                        + x.elapsed(TimeUnit.MILLISECONDS) + "ms.";
                LOG.info(msg);
                activityWriter.write(new Activity(msg, SystemJobManager.class));
            } catch (SystemJobConcurrencyException ignored) {
            } catch (Exception e) {
                LOG.error("Unhandled error while running SystemJob <" + job.getId() + "> [" + jobClass + "]",
                        e);
            } finally {
                jobs.remove(job.getId());
            }
        }
    }, delay, timeUnit);

    LOG.info("Submitted SystemJob <{}> [{}]", job.getId(), jobClass);
    return job.getId();
}

From source file:org.locationtech.geogig.repository.RevTreeBuilder2.java

/**
 * Traverses the nodes in the {@link NodeIndex}, deletes the ones with {@link ObjectId#NULL
 * NULL} ObjectIds, and adds the ones with non "NULL" ids.
 * /*from w w  w.  j ava2 s .  c o  m*/
 * @return the new tree, not saved to the object database. Any bucket tree though is saved when
 *         this method returns.
 */
public RevTree build() {
    if (nodeIndex == null) {
        return original.builder(db).build();
    }

    Stopwatch sw = Stopwatch.createStarted();
    RevTreeBuilder builder;
    try {
        builder = new RevTreeBuilder(db, original);
        Iterator<Node> nodes = nodeIndex.nodes();
        while (nodes.hasNext()) {
            Node node = nodes.next();
            if (node.getObjectId().isNull()) {
                builder.remove(node.getName());
            } else {
                builder.put(node);
            }
        }
    } catch (RuntimeException e) {
        e.printStackTrace();
        throw e;
    } finally {
        nodeIndex.close();
    }
    LOGGER.debug("Index traversed in {}", sw.stop());
    sw.reset().start();

    RevTree namedTree = builder.build();
    saveExtraFeatureTypes();
    LOGGER.debug("RevTreeBuilder.build() in {}", sw.stop());
    return namedTree;
}

From source file:org.glowroot.container.Threads.java

public static void postShutdownCheck(Collection<Thread> preExistingThreads) throws InterruptedException {
    // give it 5 seconds to shutdown threads
    Stopwatch stopwatch = Stopwatch.createStarted();
    List<Thread> rogueThreads;
    do {//w ww.  jav  a  2s.  c o  m
        rogueThreads = getNonPreExistingThreads(preExistingThreads);
        if (rogueThreads.isEmpty()) {
            // success
            return;
        }
        // make an exception for H2's Generate Seed thread since it can take a bit of time to
        // complete on some systems (e.g. travis-ci), but is otherwise harmless
        if (rogueThreads.size() == 1 && rogueThreads.get(0).getName().equals(getGenerateSeedThreadName())) {
            // success
            return;
        }
        // wait a few milliseconds before trying again
        Thread.sleep(10);
    } while (stopwatch.elapsed(SECONDS) < 5);
    // failure
    throw new RogueThreadsException(rogueThreads);
}

From source file:org.apache.brooklyn.rest.filter.LoggingFilter.java

@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
        throws IOException, ServletException {
    HttpServletRequest httpRequest = (HttpServletRequest) request;
    HttpServletResponse httpResponse = (HttpServletResponse) response;

    String rid = RequestTaggingFilter.getTag();
    boolean isInteresting = !UNINTERESTING_METHODS.contains(httpRequest.getMethod().toUpperCase());
    boolean shouldLog = (isInteresting && LOG.isDebugEnabled()) || LOG.isTraceEnabled();
    boolean requestErrored = false;
    if (shouldLog) {
        String message = "Request {} starting: {} {} from {}";
        Object[] args = new Object[] { rid, httpRequest.getMethod(), httpRequest.getRequestURI(),
                httpRequest.getRemoteAddr() };
        if (isInteresting) {
            LOG.debug(message, args);/*from   ww w  .j a  va2 s  . co  m*/
        } else {
            LOG.trace(message, args);
        }
    }

    Stopwatch timer = Stopwatch.createStarted();
    try {
        chain.doFilter(request, response);
    } catch (Throwable e) {
        requestErrored = true;
        isInteresting = true;
        LOG.warn("Request " + rid + " (" + httpRequest.getMethod() + " " + httpRequest.getRequestURI()
                + " from " + httpRequest.getRemoteAddr() + ") failed: " + e, e);
        // Propagate for handling by other filter
        throw Exceptions.propagate(e);
    } finally {
        timer.stop();
        // This logging must not happen before chain.doFilter, or FormMapProvider will not work as expected.
        // Getting the parameter map consumes the request body and only resource methods using @FormParam
        // will work as expected.
        isInteresting |= (timer.elapsed(TimeUnit.SECONDS) - REQUEST_DURATION_LOG_POINT.toSeconds()) > 0;
        if (shouldLog) {
            boolean includeHeaders = requestErrored || httpResponse.getStatus() / 100 == 5
                    || LOG.isTraceEnabled();
            String message = getRequestCompletedMessage(includeHeaders, Duration.of(timer), rid, httpRequest,
                    httpResponse);
            if (requestErrored || isInteresting) {
                LOG.debug(message);
            } else {
                LOG.trace(message);
            }
        }
    }
}

From source file:org.grouplens.lenskit.cli.RecommenderLoader.java

LenskitRecommenderEngine loadEngine() throws RecommenderBuildException, IOException {
    LenskitConfiguration roots = new LenskitConfiguration();
    roots.addRoot(ItemNameDAO.class);
    File modelFile = options.get("model_file");
    if (modelFile == null) {
        logger.info("creating fresh recommender");
        LenskitRecommenderEngineBuilder builder = LenskitRecommenderEngine.newBuilder();
        for (LenskitConfiguration config : environment.loadConfigurations(getConfigFiles())) {
            builder.addConfiguration(config);
        }//from  w  w  w  .j  a  va  2s. c om
        builder.addConfiguration(input.getConfiguration());
        builder.addConfiguration(roots);
        Stopwatch timer = Stopwatch.createStarted();
        LenskitRecommenderEngine engine = builder.build();
        timer.stop();
        logger.info("built recommender in {}", timer);
        return engine;
    } else {
        logger.info("loading recommender from {}", modelFile);
        LenskitRecommenderEngineLoader loader = LenskitRecommenderEngine.newLoader();
        for (LenskitConfiguration config : environment.loadConfigurations(getConfigFiles())) {
            loader.addConfiguration(config);
        }
        loader.addConfiguration(input.getConfiguration());
        loader.addConfiguration(roots);
        Stopwatch timer = Stopwatch.createStarted();
        LenskitRecommenderEngine engine;
        InputStream input = new FileInputStream(modelFile);
        try {
            input = CompressionMode.autodetect(modelFile).wrapInput(input);
            engine = loader.load(input);
        } finally {
            input.close();
        }
        timer.stop();
        logger.info("loaded recommender in {}", timer);
        return engine;
    }
}

From source file:org.obm.push.store.jdbc.CollectionDaoJdbcImpl.java

@Override
public void resetCollection(Device device, CollectionId collectionId) throws DaoException {
    String statement = "DELETE FROM opush_sync_state WHERE device_id=? AND collection_id=?";
    try (Connection con = dbcp.getConnection(); PreparedStatement ps = con.prepareStatement(statement)) {
        ps.setInt(1, device.getDatabaseId());
        ps.setInt(2, collectionId.asInt());
        Stopwatch stopwatch = Stopwatch.createStarted();
        ps.executeUpdate();/*from  w w w.j av  a 2s  .co m*/

        logger.warn("mappings & states cleared for sync of collection {} of device {}", collectionId,
                device.getDevId());
        logger.warn("Deletion time: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (SQLException e) {
        throw new DaoException(e);
    }
}