Example usage for com.google.common.base Stopwatch reset

List of usage examples for com.google.common.base Stopwatch reset

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch reset.

Prototype

public Stopwatch reset() 

Source Link

Document

Sets the elapsed time for this stopwatch to zero, and places it in a stopped state.

Usage

From source file:org.glowroot.agent.embedded.util.DataSource.java

private List<H2Table> analyzeH2DiskSpaceUnderSuppressQueryTimeout() throws Exception {
    List<H2Table> tables = Lists.newArrayList();
    for (String tableName : getAllTableNames()) {
        Stopwatch stopwatch = Stopwatch.createStarted();
        long bytes = queryForLong("call disk_space_used (?)", tableName);
        // sleep a bit to allow some other threads to use the data source
        MILLISECONDS.sleep(stopwatch.elapsed(MILLISECONDS) / 10);
        stopwatch.reset().start();
        long rows = queryForLong("select count(*) from " + tableName);
        // sleep a bit to allow some other threads to use the data source
        MILLISECONDS.sleep(stopwatch.elapsed(MILLISECONDS) / 10);
        tables.add(ImmutableH2Table.builder().name(tableName).bytes(bytes).rows(rows).build());
    }//w  w  w . j  ava 2  s  .  c o m
    return tables;
}

From source file:ch.ge.ve.protopoc.service.protocol.DefaultAuthority.java

@Override
public FinalizationCodePart handleConfirmation(Integer voterIndex, Confirmation confirmation)
        throws IncorrectConfirmationRuntimeException {
    Preconditions.checkState(publicCredentials != null,
            "The public credentials need to have been retrieved first");
    Stopwatch stopwatch = Stopwatch.createStarted();
    List<BigInteger> publicConfirmationCredentials = publicCredentials.stream().map(p -> p.y)
            .collect(Collectors.toList());

    if (!voteConfirmationAuthorityAlgorithms.checkConfirmation(voterIndex, confirmation,
            publicConfirmationCredentials, ballotEntries, confirmationEntries)) {
        throw new IncorrectConfirmationRuntimeException(
                "Confirmation for voter " + voterIndex + " was deemed invalid");
    }//from  w  w w  . j  a v a 2s  .co m
    stopwatch.stop();
    confirmationVerificationTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    confirmationEntries.add(new ConfirmationEntry(voterIndex, confirmation));

    stopwatch.reset().start();
    FinalizationCodePart finalization = voteConfirmationAuthorityAlgorithms.getFinalization(voterIndex,
            electorateData.getP(), ballotEntries);
    stopwatch.stop();
    finalizationComputationTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    return finalization;
}

From source file:com.google.api.control.Client.java

/**
 * Process a report request./*from   w  ww.  ja va 2 s .co  m*/
 *
 * The {@code req} is first passed to the {@code ReportAggregator}. It will either be aggregated
 * with prior requests or sent immediately
 *
 * @param req a {@link ReportRequest}
 */
public void report(ReportRequest req) {
    Preconditions.checkState(running, "Cannot report if it's not running");
    statistics.totalReports.incrementAndGet();
    statistics.reportedOperations.addAndGet(req.getOperationsCount());
    Stopwatch w = Stopwatch.createStarted(ticker);
    boolean reported = reportAggregator.report(req);
    statistics.totalReportCacheUpdateTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
    if (!reported) {
        try {
            statistics.directReports.incrementAndGet();
            w.reset().start();
            transport.services().report(serviceName, req).execute();
            statistics.totalTransportedReportTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
        } catch (IOException e) {
            log.log(Level.SEVERE,
                    String.format("direct send of a report request %s failed because of %s", req, e));
        }
    }

    if (isRunningSchedulerDirectly()) {
        try {
            scheduler.run(false /* don't block */);
        } catch (InterruptedException e) {
            log.log(Level.SEVERE, String.format("direct run of scheduler failed because of %s", e));
        }
    }
    logStatistics();
}

From source file:com.spotify.missinglink.maven.CheckMojo.java

private Collection<Conflict> loadArtifactsAndCheckConflicts() {
    // includes declared and transitive dependencies, anything in the scopes configured to be
    // included//from ww  w .  java 2s .  co m
    final List<org.apache.maven.artifact.Artifact> projectDeps = this.project.getArtifacts().stream()
            .filter(artifact -> includeScopes.contains(Scope.valueOf(artifact.getScope())))
            .collect(Collectors.toList());

    getLog().debug("project dependencies: "
            + projectDeps.stream().map(this::mavenCoordinates).collect(Collectors.toList()));

    Stopwatch stopwatch = Stopwatch.createStarted();
    // artifacts in runtime scope from the maven project (including transitives)
    final ImmutableList<Artifact> runtimeProjectArtifacts = constructArtifacts(projectDeps);
    stopwatch.stop();
    getLog().debug("constructing runtime artifacts took: " + asMillis(stopwatch) + " ms");

    // also need to load JDK classes from the bootstrap classpath
    final String bootstrapClasspath = bootClassPathToUse();

    stopwatch.reset().start();

    final List<Artifact> bootstrapArtifacts = constructArtifacts(
            Arrays.<String>asList(bootstrapClasspath.split(System.getProperty("path.separator"))));

    stopwatch.stop();
    getLog().debug("constructing bootstrap artifacts took: " + asMillis(stopwatch) + " ms");

    final ImmutableList<Artifact> allArtifacts = ImmutableList.<Artifact>builder()
            .addAll(runtimeProjectArtifacts).addAll(bootstrapArtifacts).build();

    final ImmutableList<Artifact> runtimeArtifactsAfterExclusions = ImmutableList.copyOf(runtimeProjectArtifacts
            .stream().filter(artifact -> !isExcluded(artifact)).collect(Collectors.toSet()));

    final Artifact projectArtifact = toArtifact(project.getBuild().getOutputDirectory());

    if (projectArtifact.classes().isEmpty()) {
        getLog().warn("No classes found in project build directory" + " - did you run 'mvn compile' first?");
    }

    stopwatch.reset().start();

    getLog().debug("Checking for conflicts starting from " + projectArtifact.name().name());
    getLog().debug("Artifacts included in the project: ");
    for (Artifact artifact : runtimeArtifactsAfterExclusions) {
        getLog().debug("    " + artifact.name().name());
    }

    final Collection<Conflict> conflicts = conflictChecker.check(projectArtifact,
            runtimeArtifactsAfterExclusions, allArtifacts);

    stopwatch.stop();
    getLog().debug("conflict checking took: " + asMillis(stopwatch) + " ms");

    getLog().debug(conflicts.size() + " total conflicts found");
    return conflicts;
}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

private long eventTime(Stopwatch eventTime) {
    eventTime.stop();//from  w ww . j  av a2s.  c  o  m
    long elapsedTime = eventTime.elapsed(TimeUnit.MILLISECONDS);
    eventTime.reset();
    return elapsedTime;
}

From source file:org.apache.hive.ptest.execution.HostExecutor.java

/**
 * Executes parallel test until the parallel work queue is empty. Then
 * executes the isolated tests on the host. During each phase if a
 * AbortDroneException is thrown the drone is removed possibly
 * leaving this host with zero functioning drones. If all drones
 * are removed the host will be replaced before the next run.
 *//*from  w  w w . j av  a2 s  . c  o  m*/
private void executeTests(final BlockingQueue<TestBatch> parallelWorkQueue,
        final BlockingQueue<TestBatch> isolatedWorkQueue, final Set<TestBatch> failedTestResults)
        throws Exception {
    if (mShutdown) {
        mLogger.warn("Shutting down host " + mHost.getName());
        return;
    }
    mLogger.info("Starting parallel execution on " + mHost.getName());
    List<ListenableFuture<Void>> droneResults = Lists.newArrayList();
    for (final Drone drone : ImmutableList.copyOf(mDrones)) {
        droneResults.add(mExecutor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                TestBatch batch = null;
                Stopwatch sw = Stopwatch.createUnstarted();
                try {
                    do {
                        batch = parallelWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS);
                        if (mShutdown) {
                            mLogger.warn("Shutting down host " + mHost.getName());
                            return null;
                        }
                        if (batch != null) {
                            numParallelBatchesProcessed++;
                            sw.reset().start();
                            try {
                                if (!executeTestBatch(drone, batch, failedTestResults)) {
                                    failedTestResults.add(batch);
                                }
                            } finally {
                                sw.stop();
                                mLogger.info(
                                        "Finished processing parallel batch [{}] on host {}. ElapsedTime(ms)={}",
                                        new Object[] { batch.getName(), getHost().toShortString(),
                                                sw.elapsed(TimeUnit.MILLISECONDS) });
                            }
                        }
                    } while (!mShutdown && !parallelWorkQueue.isEmpty());
                } catch (AbortDroneException ex) {
                    mDrones.remove(drone); // return value not checked due to concurrent access
                    mLogger.error("Aborting drone during parallel execution", ex);
                    if (batch != null) {
                        Preconditions.checkState(parallelWorkQueue.add(batch),
                                "Could not add batch to parallel queue " + batch);
                    }
                }
                return null;
            }
        }));
    }
    if (mShutdown) {
        mLogger.warn("Shutting down host " + mHost.getName());
        return;
    }
    Futures.allAsList(droneResults).get();
    mLogger.info("Starting isolated execution on " + mHost.getName());
    for (Drone drone : ImmutableList.copyOf(mDrones)) {
        TestBatch batch = null;
        Stopwatch sw = Stopwatch.createUnstarted();
        try {
            do {

                batch = isolatedWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS);
                if (batch != null) {
                    numIsolatedBatchesProcessed++;
                    sw.reset().start();
                    try {
                        if (!executeTestBatch(drone, batch, failedTestResults)) {
                            failedTestResults.add(batch);
                        }
                    } finally {
                        sw.stop();
                        mLogger.info("Finished processing isolated batch [{}] on host {}. ElapsedTime(ms)={}",
                                new Object[] { batch.getName(), getHost().toShortString(),
                                        sw.elapsed(TimeUnit.MILLISECONDS) });
                    }
                }
            } while (!mShutdown && !isolatedWorkQueue.isEmpty());
        } catch (AbortDroneException ex) {
            mDrones.remove(drone); // return value not checked due to concurrent access
            mLogger.error("Aborting drone during isolated execution", ex);
            if (batch != null) {
                Preconditions.checkState(isolatedWorkQueue.add(batch),
                        "Could not add batch to isolated queue " + batch);
            }
        }
    }
}

From source file:org.terasology.rendering.primitives.ChunkTessellator.java

public ChunkMesh generateMesh(ChunkView chunkView, int meshHeight, int verticalOffset) {
    PerformanceMonitor.startActivity("GenerateMesh");
    ChunkMesh mesh = new ChunkMesh(bufferPool);

    final Stopwatch watch = Stopwatch.createStarted();

    for (int x = 0; x < ChunkConstants.SIZE_X; x++) {
        for (int z = 0; z < ChunkConstants.SIZE_Z; z++) {
            for (int y = verticalOffset; y < verticalOffset + meshHeight; y++) {
                Biome biome = chunkView.getBiome(x, y, z);

                Block block = chunkView.getBlock(x, y, z);
                if (block != null && !block.isInvisible()) {
                    generateBlockVertices(chunkView, mesh, x, y, z, biome);
                }/*from w  ww  . j a  v a 2  s  . c o m*/
            }
        }
    }
    watch.stop();

    mesh.setTimeToGenerateBlockVertices((int) watch.elapsed(TimeUnit.MILLISECONDS));

    watch.reset().start();
    generateOptimizedBuffers(chunkView, mesh);
    watch.stop();
    mesh.setTimeToGenerateOptimizedBuffers((int) watch.elapsed(TimeUnit.MILLISECONDS));
    statVertexArrayUpdateCount++;

    PerformanceMonitor.endActivity();
    return mesh;
}

From source file:org.apache.accumulo.gc.replication.CloseWriteAheadLogReferences.java

@Override
public void run() {
    // As long as we depend on a newer Guava than Hadoop uses, we have to make sure we're compatible with
    // what the version they bundle uses.
    Stopwatch sw = new Stopwatch();

    Connector conn;// w  ww  .j  ava  2s  .  c  o  m
    try {
        conn = context.getConnector();
    } catch (Exception e) {
        log.error("Could not create connector", e);
        throw new RuntimeException(e);
    }

    if (!ReplicationTable.isOnline(conn)) {
        log.debug("Replication table isn't online, not attempting to clean up wals");
        return;
    }

    Span findWalsSpan = Trace.start("findReferencedWals");
    HashSet<String> closed = null;
    try {
        sw.start();
        closed = getClosedLogs(conn);
    } finally {
        sw.stop();
        findWalsSpan.stop();
    }

    log.info("Found " + closed.size() + " WALs referenced in metadata in " + sw.toString());
    sw.reset();

    Span updateReplicationSpan = Trace.start("updateReplicationTable");
    long recordsClosed = 0;
    try {
        sw.start();
        recordsClosed = updateReplicationEntries(conn, closed);
    } finally {
        sw.stop();
        updateReplicationSpan.stop();
    }

    log.info(
            "Closed " + recordsClosed + " WAL replication references in replication table in " + sw.toString());
}

From source file:org.apache.drill.exec.store.parquet.metadata.Metadata.java

/**
 * Get the parquet metadata for the parquet files in a directory.
 *
 * @param path the path of the directory
 * @return metadata object for an entire parquet directory structure
 * @throws IOException in case of problems during accessing files
 *//*w  w  w  . j a  va2 s. c om*/
private ParquetTableMetadata_v3 getParquetTableMetadata(String path, FileSystem fs) throws IOException {
    Path p = new Path(path);
    FileStatus fileStatus = fs.getFileStatus(p);
    Stopwatch watch = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
    List<FileStatus> fileStatuses = new ArrayList<>();
    if (fileStatus.isFile()) {
        fileStatuses.add(fileStatus);
    } else {
        fileStatuses.addAll(DrillFileSystemUtil.listFiles(fs, p, true));
    }
    if (watch != null) {
        logger.debug("Took {} ms to get file statuses", watch.elapsed(TimeUnit.MILLISECONDS));
        watch.reset();
        watch.start();
    }

    Map<FileStatus, FileSystem> fileStatusMap = fileStatuses.stream().collect(java.util.stream.Collectors
            .toMap(Function.identity(), s -> fs, (oldFs, newFs) -> newFs, LinkedHashMap::new));

    ParquetTableMetadata_v3 metadata_v3 = getParquetTableMetadata(fileStatusMap);
    if (watch != null) {
        logger.debug("Took {} ms to read file metadata", watch.elapsed(TimeUnit.MILLISECONDS));
        watch.stop();
    }
    return metadata_v3;
}

From source file:com.facebook.buck.distributed.DistBuildArtifactCacheImpl.java

@Override
public synchronized void prewarmRemoteContains(ImmutableSet<BuildRule> rulesToBeChecked) {
    @SuppressWarnings("PMD.PrematureDeclaration")
    Stopwatch stopwatch = Stopwatch.createStarted();
    Set<BuildRule> unseenRules = rulesToBeChecked.stream()
            .filter(rule -> !remoteCacheContainsFutures.containsKey(rule)).collect(Collectors.toSet());

    if (unseenRules.isEmpty()) {
        return;//from  w w w.  j  ava2s .  co m
    }

    LOG.info("Checking remote cache for [%d] new rules.", unseenRules.size());
    Map<BuildRule, ListenableFuture<RuleKey>> rulesToKeys = Maps.asMap(unseenRules,
            rule -> ruleKeyCalculator.calculate(eventBus, rule));

    ListenableFuture<Map<RuleKey, CacheResult>> keysToCacheResultFuture = Futures
            .transformAsync(Futures.allAsList(rulesToKeys.values()), ruleKeys -> {
                LOG.info("Computing RuleKeys for %d new rules took %dms.", unseenRules.size(),
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                stopwatch.start();
                return multiContainsAsync(ruleKeys);
            }, executorService);

    Map<BuildRule, ListenableFuture<Boolean>> containsResultsForUnseenRules = Maps
            .asMap(unseenRules,
                    rule -> Futures.transform(keysToCacheResultFuture, keysToCacheResult -> Objects
                            .requireNonNull(keysToCacheResult.get(Futures.getUnchecked(rulesToKeys.get(rule))))
                            .getType().isSuccess(), MoreExecutors.directExecutor()));

    remoteCacheContainsFutures.putAll(containsResultsForUnseenRules);
    Futures.allAsList(containsResultsForUnseenRules.values())
            .addListener(() -> LOG.info("Checking the remote cache for %d rules took %dms.", unseenRules.size(),
                    stopwatch.elapsed(TimeUnit.MILLISECONDS)), MoreExecutors.directExecutor());
}