Example usage for com.google.common.base Stopwatch elapsed

List of usage examples for com.google.common.base Stopwatch elapsed

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch elapsed.

Prototype

@CheckReturnValue
public long elapsed(TimeUnit desiredUnit) 

Source Link

Document

Returns the current elapsed time shown on this stopwatch, expressed in the desired time unit, with any fraction rounded down.

Usage

From source file:qa.qcri.nadeef.core.pipeline.ViolationDetector.java

/**
 * {@inheritDoc}//  w  ww . j  av a 2 s  . c o  m
 */
@Override
@SuppressWarnings("unchecked")
public Collection<Violation> execute(Optional emptyInput) throws Exception {
    detectCount = 0;
    totalThreadCount = 0;
    finishedThreadCount = 0;

    Rule rule = getCurrentContext().getRule();
    IteratorBlockingQueue iteratorBlockingQueue = new IteratorBlockingQueue();
    resultCollection.clear();
    List<Object> tupleList;
    Stopwatch stopwatch = Stopwatch.createStarted();
    List<ListenableFuture<Integer>> futures = Lists.newArrayList();
    while (true) {
        tupleList = iteratorBlockingQueue.poll();
        if (tupleList.size() == 0) {
            break;
        }

        totalThreadCount++;
        ListenableFuture<Integer> future = service.submit(new Detector(tupleList, rule));
        futures.add(future);
        Futures.addCallback(future, new DetectorCallback());
    }

    // wait until all the futures finished
    for (ListenableFuture<Integer> future : futures) {
        future.get();
    }

    PerfReport.appendMetric(PerfReport.Metric.DetectTimeOnly, stopwatch.elapsed(TimeUnit.MILLISECONDS));
    PerfReport.appendMetric(PerfReport.Metric.DetectCount, detectCount);
    PerfReport.appendMetric(PerfReport.Metric.DetectThreadCount, totalThreadCount);
    stopwatch.stop();
    return resultCollection;
}

From source file:org.glowroot.agent.embedded.util.DataSource.java

private List<H2Table> analyzeH2DiskSpaceUnderSuppressQueryTimeout() throws Exception {
    List<H2Table> tables = Lists.newArrayList();
    for (String tableName : getAllTableNames()) {
        Stopwatch stopwatch = Stopwatch.createStarted();
        long bytes = queryForLong("call disk_space_used (?)", tableName);
        // sleep a bit to allow some other threads to use the data source
        MILLISECONDS.sleep(stopwatch.elapsed(MILLISECONDS) / 10);
        stopwatch.reset().start();/*from   www .j  av a  2s  .co  m*/
        long rows = queryForLong("select count(*) from " + tableName);
        // sleep a bit to allow some other threads to use the data source
        MILLISECONDS.sleep(stopwatch.elapsed(MILLISECONDS) / 10);
        tables.add(ImmutableH2Table.builder().name(tableName).bytes(bytes).rows(rows).build());
    }
    return tables;
}

From source file:brooklyn.management.internal.EntityManagementSupport.java

@SuppressWarnings("deprecation")
public void onManagementStopping(ManagementTransitionInfo info) {
    synchronized (this) {
        if (managementContext != info.getManagementContext()) {
            throw new IllegalStateException("onManagementStopping encountered different management context for "
                    + entity/*from   w ww.  j a  v  a  2s  .  c om*/
                    + (!wasDeployed() ? " (wasn't deployed)" : !isDeployed() ? " (no longer deployed)" : "")
                    + ": " + managementContext + "; expected " + info.getManagementContext());
        }
        Stopwatch startTime = Stopwatch.createStarted();
        while (!managementFailed.get() && nonDeploymentManagementContext != null
                && nonDeploymentManagementContext
                        .getMode() == NonDeploymentManagementContextMode.MANAGEMENT_STARTING) {
            // still becoming managed
            try {
                if (startTime.elapsed(TimeUnit.SECONDS) > 30) {
                    // emergency fix, 30s timeout for management starting
                    log.error("Management stopping event " + info + " in " + this
                            + " timed out waiting for start; proceeding to stopping");
                    break;
                }
                wait(100);
            } catch (InterruptedException e) {
                Exceptions.propagate(e);
            }
        }
        if (nonDeploymentManagementContext == null) {
            nonDeploymentManagementContext = new NonDeploymentManagementContext(entity,
                    NonDeploymentManagementContextMode.MANAGEMENT_STOPPING);
        } else {
            // already stopped? or not started?
            nonDeploymentManagementContext.setMode(
                    NonDeploymentManagementContext.NonDeploymentManagementContextMode.MANAGEMENT_STOPPING);
        }
    }
    // TODO custom stopping activities
    // TODO framework stopping events - no more sensors, executions, etc
    // (elaborate or remove ^^^ ? -AH, Sept 2014)

    if (!isReadOnly() && info.getMode().isDestroying()) {
        // if we support remote parent of local child, the following call will need to be properly remoted
        if (entity.getParent() != null)
            entity.getParent().removeChild(entity.getProxyIfAvailable());
    }
    // new subscriptions will be queued / not allowed
    nonDeploymentManagementContext.getSubscriptionManager().stopDelegatingForSubscribing();
    // new publications will be queued / not allowed
    nonDeploymentManagementContext.getSubscriptionManager().stopDelegatingForPublishing();

    if (!isReadOnly()) {
        entity.onManagementNoLongerMaster();
        entity.onManagementStopped();
    }
}

From source file:org.apache.drill.exec.store.parquet.columnreaders.AsyncPageReader.java

private void readDictionaryPageData(final ReadStatus readStatus, final ColumnReader<?> parentStatus)
        throws UserException {
    try {/*ww  w.j  a  v  a2s  . c  o m*/
        pageHeader = readStatus.getPageHeader();
        int uncompressedSize = pageHeader.getUncompressed_page_size();
        final DrillBuf dictionaryData = getDecompressedPageData(readStatus);
        Stopwatch timer = Stopwatch.createStarted();
        allocatedDictionaryBuffers.add(dictionaryData);
        DictionaryPage page = new DictionaryPage(asBytesInput(dictionaryData, 0, uncompressedSize),
                pageHeader.uncompressed_page_size, pageHeader.dictionary_page_header.num_values,
                valueOf(pageHeader.dictionary_page_header.encoding.name()));
        this.dictionary = page.getEncoding().initDictionary(parentStatus.columnDescriptor, page);
        long timeToDecode = timer.elapsed(TimeUnit.NANOSECONDS);
        stats.timeDictPageDecode.addAndGet(timeToDecode);
    } catch (Exception e) {
        handleAndThrowException(e, "Error decoding dictionary page.");
    }
}

From source file:com.vmware.photon.controller.rootscheduler.xenon.task.PlacementTaskService.java

/**
 * Helper method to process the candidates returned by getPotentialCandidates.
 *//*from   www.ja v  a  2  s.c  o m*/
private void handleGetCandidateResult(PlacementTask currentState, Operation postOperation,
        Map<String, ServerAddress> candidates, Stopwatch placementWatch) {

    ServiceUtils.logInfo(this, "elapsed-time flat-place-get-candidates %d milliseconds",
            placementWatch.elapsed(TimeUnit.MILLISECONDS));

    if (candidates.isEmpty()) {
        String msg = String.format("Place failure, constraints cannot be satisfied for request: %s",
                currentState.resource);
        PlacementTask patchState = buildPatch(TaskState.TaskStage.FAILED, currentState.taskState.isDirect,
                null);
        patchState.resultCode = PlaceResultCode.NO_SUCH_RESOURCE;
        patchState.error = msg;
        failTask(patchState, new Throwable(msg), postOperation);
        return;
    }

    // Send place request to the candidates to get a score for each one
    ServiceUtils.logInfo(this, "Sending place requests to %s with timeout %d ms", candidates,
            currentState.timeoutMs);
    Stopwatch scoreCandidatesStopwatch = Stopwatch.createStarted();

    queryHostsForScores(currentState.resource, candidates, (okResponses, allResponses) -> {
        ServiceUtils.logInfo(this, "elapsed-time flat-place-score-candidates %d milliseconds",
                scoreCandidatesStopwatch.elapsed(TimeUnit.MILLISECONDS));

        // Return the best response.
        PlacementTask patchState = selectBestResponse(okResponses, allResponses, currentState, placementWatch);
        if (postOperation == null) {
            TaskUtils.sendSelfPatch(this, patchState);
        } else {
            postOperation.setBody(patchState).complete();
        }
    });
}

From source file:org.apache.brooklyn.core.management.internal.EntityManagementSupport.java

@SuppressWarnings("deprecation")
public void onManagementStopping(ManagementTransitionInfo info) {
    synchronized (this) {
        if (managementContext != info.getManagementContext()) {
            throw new IllegalStateException("onManagementStopping encountered different management context for "
                    + entity/*  w w w  .j  a  va 2  s .  co  m*/
                    + (!wasDeployed() ? " (wasn't deployed)" : !isDeployed() ? " (no longer deployed)" : "")
                    + ": " + managementContext + "; expected " + info.getManagementContext()
                    + " (may be a pre-registered entity which was never properly managed)");
        }
        Stopwatch startTime = Stopwatch.createStarted();
        while (!managementFailed.get() && nonDeploymentManagementContext != null
                && nonDeploymentManagementContext
                        .getMode() == NonDeploymentManagementContextMode.MANAGEMENT_STARTING) {
            // still becoming managed
            try {
                if (startTime.elapsed(TimeUnit.SECONDS) > 30) {
                    // emergency fix, 30s timeout for management starting
                    log.error("Management stopping event " + info + " in " + this
                            + " timed out waiting for start; proceeding to stopping");
                    break;
                }
                wait(100);
            } catch (InterruptedException e) {
                Exceptions.propagate(e);
            }
        }
        if (nonDeploymentManagementContext == null) {
            nonDeploymentManagementContext = new NonDeploymentManagementContext(entity,
                    NonDeploymentManagementContextMode.MANAGEMENT_STOPPING);
        } else {
            // already stopped? or not started?
            nonDeploymentManagementContext.setMode(NonDeploymentManagementContextMode.MANAGEMENT_STOPPING);
        }
    }
    // TODO custom stopping activities
    // TODO framework stopping events - no more sensors, executions, etc
    // (elaborate or remove ^^^ ? -AH, Sept 2014)

    if (!isReadOnly() && info.getMode().isDestroying()) {
        // if we support remote parent of local child, the following call will need to be properly remoted
        if (entity.getParent() != null)
            entity.getParent().removeChild(entity.getProxyIfAvailable());
    }
    // new subscriptions will be queued / not allowed
    nonDeploymentManagementContext.getSubscriptionManager().stopDelegatingForSubscribing();
    // new publications will be queued / not allowed
    nonDeploymentManagementContext.getSubscriptionManager().stopDelegatingForPublishing();

    if (!isReadOnly()) {
        entity.onManagementNoLongerMaster();
        entity.onManagementStopped();
    }
}

From source file:org.bin01.db.verifier.Validator.java

private QueryResult executeQuery(String url, String username, String password, Query query, Duration timeout) {
    try (Connection connection = DriverManager.getConnection(url, username, password)) {
        connection.setClientInfo("ApplicationName", "verifier-test:" + queryPair.getName());
        connection.setCatalog(query.getCatalog());
        connection.setSchema(query.getSchema());
        long start = System.nanoTime();

        try (Statement statement = connection.createStatement()) {
            TimeLimiter limiter = new SimpleTimeLimiter();
            Stopwatch stopwatch = Stopwatch.createStarted();
            Statement limitedStatement = limiter.newProxy(statement, Statement.class, timeout.toMillis(),
                    TimeUnit.MILLISECONDS);
            try (final ResultSet resultSet = limitedStatement.executeQuery(query.getQuery())) {
                List<List<Object>> results = limiter.callWithTimeout(getResultSetConverter(resultSet),
                        timeout.toMillis() - stopwatch.elapsed(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS,
                        true);/* w  w w  . j a va2s  .  com*/
                return new QueryResult(State.SUCCESS, null, nanosSince(start), results);
            } catch (AssertionError e) {
                if (e.getMessage().startsWith("unimplemented type:")) {
                    return new QueryResult(State.INVALID, null, null, ImmutableList.<List<Object>>of());
                }
                throw e;
            } catch (SQLException | VerifierException e) {
                throw e;
            } catch (UncheckedTimeoutException e) {
                return new QueryResult(State.TIMEOUT, null, null, ImmutableList.<List<Object>>of());
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw Throwables.propagate(e);
            } catch (Exception e) {
                throw Throwables.propagate(e);
            }
        }
    } catch (SQLException e) {
        Exception exception = e;
        if (("Error executing query".equals(e.getMessage()) || "Error fetching results".equals(e.getMessage()))
                && (e.getCause() instanceof Exception)) {
            exception = (Exception) e.getCause();
        }
        State state = State.FAILED;
        return new QueryResult(state, exception, null, null);
    } catch (VerifierException e) {
        return new QueryResult(State.TOO_MANY_ROWS, e, null, null);
    }
}

From source file:org.apache.drill.exec.store.kafka.MessageIterator.java

@Override
public boolean hasNext() {
    if (recordIter != null && recordIter.hasNext()) {
        return true;
    }// w w w  . j  ava 2s.  c  o m

    long nextPosition = kafkaConsumer.position(topicPartition);
    if (nextPosition >= endOffset) {
        return false;
    }

    ConsumerRecords<byte[], byte[]> consumerRecords = null;
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        consumerRecords = kafkaConsumer.poll(kafkaPollTimeOut);
    } catch (KafkaException ke) {
        logger.error(ke.getMessage(), ke);
        throw UserException.dataReadError(ke).message(ke.getMessage()).build(logger);
    }
    stopwatch.stop();

    if (consumerRecords.isEmpty()) {
        String errorMsg = new StringBuilder().append("Failed to fetch messages within ")
                .append(kafkaPollTimeOut)
                .append(" milliseconds. Consider increasing the value of the property : ")
                .append(ExecConstants.KAFKA_POLL_TIMEOUT).toString();
        throw UserException.dataReadError().message(errorMsg).build(logger);
    }

    long lastFetchTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
    logger.debug("Total number of messages fetched : {}", consumerRecords.count());
    logger.debug("Time taken to fetch : {} milliseconds", lastFetchTime);
    totalFetchTime += lastFetchTime;

    recordIter = consumerRecords.iterator();
    return recordIter.hasNext();
}

From source file:eu.project.ttc.engines.SyntacticTermGatherer.java

@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
    LOGGER.info("Starting syntactic term gathering for TermIndex {}",
            this.termIndexResource.getTermIndex().getName());

    TermIndex termIndex = this.termIndexResource.getTermIndex();

    if (termIndexResource.getTermIndex().getTerms().isEmpty())
        return;/*from   ww w  .ja  v a 2 s  .  c o  m*/

    /*
     * Prepare observer and indexes
     */
    for (RunConfig runConfig : RUN_CONFIGS) {
        CustomTermIndex customIndex = termIndex.getCustomIndex(runConfig.indexName);
        customIndex.cleanSingletonKeys();

        // clean biggest classes
        customIndex.cleanEntriesByMaxSize(WARNING_CRITICAL_SIZE);

        CustomIndexStats stats = new CustomIndexStats(customIndex);

        // Display class sizes
        Stopwatch sw1 = Stopwatch.createStarted();
        int k = 0;
        LOGGER.debug("Biggest class is {}, size: {}", stats.getBiggestClass(), stats.getBiggestSize());

        int size;
        for (Integer i : stats.getSizeCounters().keySet()) {
            k++;
            size = stats.getSizeCounters().get(i).size();
            totalComparisons = totalComparisons.add(BigInteger.valueOf(size * i * (i - 1)));
        }
        LOGGER.debug("Number of term pairs to test: " + totalComparisons);
        sw1.stop();
        LOGGER.debug("Time to get the comparisons number: " + sw1.elapsed(TimeUnit.MILLISECONDS));
        LOGGER.debug("Number of classes: " + k);
        if (taskObserver.isPresent())
            taskObserver.get().setTotalTaskWork(totalComparisons.longValue());
    }

    LOGGER.debug("Gathering with default variant rule indexing (source and target patterns)");
    for (RunConfig runConfig : RUN_CONFIGS) {
        gather(runConfig.indexName, runConfig.variantRuleIndex);
        termIndex.dropCustomIndex(runConfig.indexName);
    }

}

From source file:org.terasology.engine.module.ModuleManagerImpl.java

private Reflections tryLoadReflectionsFromCacheFile(String filename) {
    String version = TerasologyVersion.getInstance().getGitCommit();
    Path root = PathManager.getInstance().getHomePath().resolve("cache");
    Path path = root.resolve(filename + version + ".xml");

    if (Files.exists(path, LinkOption.NOFOLLOW_LINKS)) {
        logger.info("Reading reflection content from file {}", filename);
        Stopwatch sw = Stopwatch.createStarted();
        try (InputStream is = Files.newInputStream(path, StandardOpenOption.READ)) {
            engineReflections = new Reflections(new ConfigurationBuilder());
            engineReflections.collect(is);
            logger.info("Reflections read in {}ms.", sw.elapsed(TimeUnit.MILLISECONDS));
            return engineReflections;
        } catch (IOException e) {
            logger.warn("Failed to read from cache");
        }/*from  ww  w.j a va2  s.  c o  m*/
    }

    return null;
}