Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:org.bitcoinj_extra.params.AbstractBitcoinNetParams.java

@Override
public void checkDifficultyTransitions(final StoredBlock storedPrev, final Block nextBlock,
        final BlockStore blockStore) throws VerificationException, BlockStoreException {
    Block prev = storedPrev.getHeader();

    // Is this supposed to be a difficulty transition point?
    if (!isDifficultyTransitionPoint(storedPrev)) {

        // No ... so check the difficulty didn't actually change.
        if (nextBlock.getDifficultyTarget() != prev.getDifficultyTarget())
            throw new VerificationException("Unexpected change in difficulty at height "
                    + storedPrev.getHeight() + ": " + Long.toHexString(nextBlock.getDifficultyTarget()) + " vs "
                    + Long.toHexString(prev.getDifficultyTarget()));
        return;/*from   ww  w  .  j a va2  s  .com*/
    }

    // We need to find a block far back in the chain. It's OK that this is expensive because it only occurs every
    // two weeks after the initial block chain download.
    final Stopwatch watch = Stopwatch.createStarted();
    StoredBlock cursor = blockStore.get(prev.getHash());
    for (int i = 0; i < this.getInterval() - 1; i++) {
        if (cursor == null) {
            // This should never happen. If it does, it means we are following an incorrect or busted chain.
            throw new VerificationException(
                    "Difficulty transition point but we did not find a way back to the genesis block.");
        }
        cursor = blockStore.get(cursor.getHeader().getPrevBlockHash());
    }
    watch.stop();
    if (watch.elapsed(TimeUnit.MILLISECONDS) > 50)
        log.info("Difficulty transition traversal took {}", watch);

    Block blockIntervalAgo = cursor.getHeader();
    int timespan = (int) (prev.getTimeSeconds() - blockIntervalAgo.getTimeSeconds());
    // Limit the adjustment step.
    final int targetTimespan = this.getTargetTimespan();
    if (timespan < targetTimespan / 4)
        timespan = targetTimespan / 4;
    if (timespan > targetTimespan * 4)
        timespan = targetTimespan * 4;

    BigInteger newTarget = Utils.decodeCompactBits(prev.getDifficultyTarget());
    newTarget = newTarget.multiply(BigInteger.valueOf(timespan));
    newTarget = newTarget.divide(BigInteger.valueOf(targetTimespan));

    if (newTarget.compareTo(this.getMaxTarget()) > 0) {
        log.info("Difficulty hit proof of work limit: {}", newTarget.toString(16));
        newTarget = this.getMaxTarget();
    }

    int accuracyBytes = (int) (nextBlock.getDifficultyTarget() >>> 24) - 3;
    long receivedTargetCompact = nextBlock.getDifficultyTarget();

    // The calculated difficulty is to a higher precision than received, so reduce here.
    BigInteger mask = BigInteger.valueOf(0xFFFFFFL).shiftLeft(accuracyBytes * 8);
    newTarget = newTarget.and(mask);
    long newTargetCompact = Utils.encodeCompactBits(newTarget);

    if (newTargetCompact != receivedTargetCompact)
        throw new VerificationException("Network provided difficulty bits do not match what was calculated: "
                + Long.toHexString(newTargetCompact) + " vs " + Long.toHexString(receivedTargetCompact));
}

From source file:com.google.api.ads.adwords.awreporting.downloader.MultipleClientReportDownloader.java

/**
 * Downloads the specified report for all specified CIDs. Prints out list of failed CIDs. Returns
 * List<File> for all successful downloads.
 *
 * @param reportDefinition Report to download.
 * @param cids CIDs to download the report for.
 * @return Collection of File objects of downloaded/unzipped reports.
 * @throws InterruptedException error trying to stop downloader thread.
 * @throws ValidationException /*from ww w .jav a  2 s .c o m*/
 */
public Collection<File> downloadReports(final AdWordsSessionBuilderSynchronizer sessionBuilder,
        final ReportDefinition reportDefinition, final Set<Long> accountIds)
        throws InterruptedException, ValidationException {

    final Collection<Long> failed = new ConcurrentSkipListSet<Long>();
    final Collection<File> results = new ConcurrentSkipListSet<File>();

    // We use a Latch so the main thread knows when all the worker threads are complete.
    final CountDownLatch latch = new CountDownLatch(accountIds.size());

    Stopwatch stopwatch = Stopwatch.createStarted();

    for (final Long accountId : accountIds) {

        // We create a copy of the AdWordsSession specific for the Account
        AdWordsSession adWordsSession = sessionBuilder.getAdWordsSessionCopy(accountId);

        RunnableDownloader downloader = new RunnableDownloader(this.retriesCount, this.backoffInterval,
                this.bufferSize, accountId, reportDefinition, adWordsSession, results);
        downloader.setFailed(failed);
        executeRunnableDownloader(downloader, latch);
    }

    latch.await();
    stopwatch.stop();
    return this.printResultsAndReturn(results, stopwatch.elapsed(TimeUnit.MILLISECONDS), failed, accountIds);
}

From source file:es.usc.citius.composit.core.composition.optimization.FunctionalDominanceOptimizer.java

public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    // Analyze functional dominance between services. This optimization
    // identifies all dominant services using the semantic inputs and outputs
    // and the existing matches between the concepts in the graph.
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting functional dominance optimization...");
    for (int i = 0; i < network.numberOfLevels(); i++) {
        // Analyze input dominance
        log.debug(" > Analyzing functional dominance on {} (network level {})", network.getOperationsAtLevel(i),
                i);/*from ww w . j  a va2 s .c o  m*/
        localWatch.start();
        Collection<Collection<Operation<E>>> groups = functionalInputEquivalence(network, i);
        localWatch.stop();
        log.debug("\t\tInput equivalence groups: {} (computed in {})", groups, localWatch.toString());
        localWatch.reset();
        // For each equivalent group in this level, check the output dominance
        Set<Operation<E>> nonDominatedServices = new HashSet<Operation<E>>();
        for (Collection<Operation<E>> group : groups) {
            log.debug("\t\tAnalyzing output dominance for group {}", group);
            localWatch.start();
            Collection<Collection<Operation<E>>> nonDominatedGroups = functionalOutputDominance(group, network,
                    i);
            localWatch.stop();
            log.debug("\t\t\t+ Non-dominated groups detected: {} (computed in {})", nonDominatedGroups,
                    localWatch.toString());
            log.debug("\t\t\t+ Size before / after output dominance {}/{}", group.size(),
                    nonDominatedGroups.size());
            // Pick one non dominated service for each group randomly.
            for (Collection<Operation<E>> ndGroup : nonDominatedGroups) {
                Operation<E> representant = ndGroup.iterator().next();
                log.debug("\t\t\t\t- {} has been selected as the representative service of the group {}",
                        representant, ndGroup);
                nonDominatedServices.add(representant);
            }
        }
        optimized.add(nonDominatedServices);
    }
    localWatch.reset().start();
    DirectedAcyclicSMN<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Functional optimized match network computed in {}", localWatch.toString());
    log.debug("Functional Dominance Optimization done in {}. Size before/after {}/{}.",
            globalWatch.stop().toString(), network.listOperations().size(),
            optimizedNetwork.listOperations().size());
    return optimizedNetwork;
}

From source file:com.twitter.distributedlog.lock.DistributedLock.java

/**
 * Asynchronously acquire the lock. Technically the try phase of this operation--which adds us to the waiter
 * list--is executed synchronously, but the lock wait itself doesn't block.
 *///from   w ww. j a  v  a2  s .co m
public synchronized Future<DistributedLock> asyncAcquire() {
    if (null != lockAcquireFuture) {
        return Future
                .exception(new UnexpectedException("Someone is already acquiring/acquired lock " + lockPath));
    }
    final Promise<DistributedLock> promise = new Promise<DistributedLock>(new Function<Throwable, BoxedUnit>() {
        @Override
        public BoxedUnit apply(Throwable cause) {
            lockStateExecutor.submit(lockPath, new Runnable() {
                @Override
                public void run() {
                    asyncClose();
                }
            });
            return BoxedUnit.UNIT;
        }
    });
    final Stopwatch stopwatch = Stopwatch.createStarted();
    promise.addEventListener(new FutureEventListener<DistributedLock>() {
        @Override
        public void onSuccess(DistributedLock lock) {
            acquireStats.registerSuccessfulEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        }

        @Override
        public void onFailure(Throwable cause) {
            acquireStats.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
            // release the lock if fail to acquire
            asyncClose();
        }
    });
    this.lockAcquireFuture = promise;
    lockStateExecutor.submit(lockPath, new Runnable() {
        @Override
        public void run() {
            doAsyncAcquire(promise, lockTimeout);
        }
    });
    return promise;
}

From source file:org.sonatype.nexus.proxy.storage.remote.httpclient.HttpClientRemoteStorage.java

/**
 * Executes the HTTP request.//from  ww w  .  java  2 s  .c o  m
 * <p/>
 * In case of any exception thrown by HttpClient, it will release the connection. In other cases it is the duty of
 * caller to do it, or process the input stream.
 *
 * @param repository to execute the HTTP method fpr
 * @param request resource store request that triggered the HTTP request
 * @param httpRequest HTTP request to be executed
 * @return response of making the request
 * @throws RemoteStorageException If an error occurred during execution of HTTP request
 */
@VisibleForTesting
HttpResponse executeRequest(final ProxyRepository repository, final ResourceStoreRequest request,
        final HttpUriRequest httpRequest) throws RemoteStorageException {
    final Stopwatch stopwatch = timingLog.isDebugEnabled() ? new Stopwatch().start() : null;
    try {
        return doExecuteRequest(repository, request, httpRequest);
    } finally {
        if (stopwatch != null) {
            stopwatch.stop();
            if (timingLog.isDebugEnabled()) {
                timingLog.debug("[{}] {} {} took {}", repository.getId(), httpRequest.getMethod(),
                        httpRequest.getURI(), stopwatch);
            }
        }
    }
}

From source file:org.apache.accumulo.master.replication.RemoveCompleteReplicationRecords.java

@Override
public void run() {
    BatchScanner bs;//from  w w  w.  jav a2s  .c om
    BatchWriter bw;
    try {
        bs = ReplicationTable.getBatchScanner(conn, 4);
        bw = ReplicationTable.getBatchWriter(conn);

        if (bs == null || bw == null)
            throw new AssertionError(
                    "Inconceivable; an exception should have been thrown, but 'bs' or 'bw' was null instead");
    } catch (ReplicationTableOfflineException e) {
        log.debug("Not attempting to remove complete replication records as the table ({}) isn't yet online",
                ReplicationTable.NAME);
        return;
    }

    bs.setRanges(Collections.singleton(new Range()));
    IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
    StatusSection.limit(bs);
    WorkSection.limit(bs);
    bs.addScanIterator(cfg);

    Stopwatch sw = new Stopwatch();
    long recordsRemoved = 0;
    try {
        sw.start();
        recordsRemoved = removeCompleteRecords(conn, bs, bw);
    } finally {
        if (null != bs) {
            bs.close();
        }
        if (null != bw) {
            try {
                bw.close();
            } catch (MutationsRejectedException e) {
                log.error("Error writing mutations to {}, will retry", ReplicationTable.NAME, e);
            }
        }

        sw.stop();
    }

    log.info("Removed {} complete replication entries from the table {}", recordsRemoved,
            ReplicationTable.NAME);
}

From source file:com.spotify.missinglink.maven.CheckMojo.java

private Collection<Conflict> loadArtifactsAndCheckConflicts() {
    // includes declared and transitive dependencies, anything in the scopes configured to be
    // included/*from   w  w w  .  j av a 2 s.  c  om*/
    final List<org.apache.maven.artifact.Artifact> projectDeps = this.project.getArtifacts().stream()
            .filter(artifact -> includeScopes.contains(Scope.valueOf(artifact.getScope())))
            .collect(Collectors.toList());

    getLog().debug("project dependencies: "
            + projectDeps.stream().map(this::mavenCoordinates).collect(Collectors.toList()));

    Stopwatch stopwatch = Stopwatch.createStarted();
    // artifacts in runtime scope from the maven project (including transitives)
    final ImmutableList<Artifact> runtimeProjectArtifacts = constructArtifacts(projectDeps);
    stopwatch.stop();
    getLog().debug("constructing runtime artifacts took: " + asMillis(stopwatch) + " ms");

    // also need to load JDK classes from the bootstrap classpath
    final String bootstrapClasspath = bootClassPathToUse();

    stopwatch.reset().start();

    final List<Artifact> bootstrapArtifacts = constructArtifacts(
            Arrays.<String>asList(bootstrapClasspath.split(System.getProperty("path.separator"))));

    stopwatch.stop();
    getLog().debug("constructing bootstrap artifacts took: " + asMillis(stopwatch) + " ms");

    final ImmutableList<Artifact> allArtifacts = ImmutableList.<Artifact>builder()
            .addAll(runtimeProjectArtifacts).addAll(bootstrapArtifacts).build();

    final ImmutableList<Artifact> runtimeArtifactsAfterExclusions = ImmutableList.copyOf(runtimeProjectArtifacts
            .stream().filter(artifact -> !isExcluded(artifact)).collect(Collectors.toSet()));

    final Artifact projectArtifact = toArtifact(project.getBuild().getOutputDirectory());

    if (projectArtifact.classes().isEmpty()) {
        getLog().warn("No classes found in project build directory" + " - did you run 'mvn compile' first?");
    }

    stopwatch.reset().start();

    getLog().debug("Checking for conflicts starting from " + projectArtifact.name().name());
    getLog().debug("Artifacts included in the project: ");
    for (Artifact artifact : runtimeArtifactsAfterExclusions) {
        getLog().debug("    " + artifact.name().name());
    }

    final Collection<Conflict> conflicts = conflictChecker.check(projectArtifact,
            runtimeArtifactsAfterExclusions, allArtifacts);

    stopwatch.stop();
    getLog().debug("conflict checking took: " + asMillis(stopwatch) + " ms");

    getLog().debug(conflicts.size() + " total conflicts found");
    return conflicts;
}

From source file:monasca.persister.repository.vertica.VerticaMetricRepo.java

private void writeRowsFromTempStagingTablesToPermTables(String id) {

    Stopwatch sw = Stopwatch.createStarted();

    handle.execute(definitionsTempStagingTableInsertStmt);
    handle.execute("truncate table " + definitionsTempStagingTableName);
    sw.stop();

    logger.debug("[{}]: flushing definitions temp staging table took: {}", id, sw);

    sw.reset().start();/*  w w  w  . j  a v  a 2 s. co  m*/
    handle.execute(dimensionsTempStagingTableInsertStmt);
    handle.execute("truncate table " + dimensionsTempStagingTableName);
    sw.stop();

    logger.debug("[{}]: flushing dimensions temp staging table took: {}", id, sw);

    sw.reset().start();
    handle.execute(definitionDimensionsTempStagingTableInsertStmt);
    handle.execute("truncate table " + definitionDimensionsTempStagingTableName);
    sw.stop();

    logger.debug("[{}]: flushing definition dimensions temp staging table took: {}", id, sw);
}

From source file:cosmos.mapred.MediawikiQueries.java

public void run(int numIterations) throws Exception {
    final Random offsetR = new Random(), cardinalityR = new Random();

    int iters = 0;

    while (iters < numIterations) {
        Store id = Store.create(this.con,
                this.con.securityOperations().getUserAuthorizations(this.con.whoami()),
                IdentitySet.<Index>create());

        int offset = offsetR.nextInt(MAX_OFFSET);
        int numRecords = cardinalityR.nextInt(MAX_SIZE) + 1;

        BatchScanner bs = this.con.createBatchScanner("sortswiki", new Authorizations(), 4);

        bs.setRanges(Collections.singleton(new Range(Integer.toString(offset), Integer.toString(MAX_ROW))));

        Iterable<Entry<Key, Value>> inputIterable = Iterables.limit(bs, numRecords);

        this.sorts.register(id);

        System.out.println(Thread.currentThread().getName() + ": " + id.uuid() + " - Iteration " + iters);
        long recordsReturned = 0l;
        Function<Entry<Key, Value>, MultimapRecord> func = new Function<Entry<Key, Value>, MultimapRecord>() {
            @Override/*from w w w.java2  s.  co m*/
            public MultimapRecord apply(Entry<Key, Value> input) {
                Page p;
                try {
                    p = Page.parseFrom(input.getValue().get());
                } catch (InvalidProtocolBufferException e) {
                    throw new RuntimeException(e);
                }
                return pagesToQueryResult(p);
            }
        };

        Map<Column, Long> counts = Maps.newHashMap();
        ArrayList<MultimapRecord> tformSource = Lists.newArrayListWithCapacity(20000);

        Stopwatch sw = new Stopwatch();
        Stopwatch tformSw = new Stopwatch();

        for (Entry<Key, Value> input : inputIterable) {
            tformSw.start();

            MultimapRecord r = func.apply(input);
            tformSource.add(r);

            tformSw.stop();

            loadCountsForRecord(counts, r);
            recordsReturned++;
        }

        sw.start();
        this.sorts.addResults(id, tformSource);
        sw.stop();

        long actualNumResults = tformSource.size();

        System.out.println(Thread.currentThread().getName() + ": Took " + tformSw + " transforming and " + sw
                + " to store " + recordsReturned + " records");
        logTiming(actualNumResults, tformSw.elapsed(TimeUnit.MILLISECONDS), "transformInput");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "ingest");

        bs.close();

        Random r = new Random();
        int max = r.nextInt(10) + 1;

        // Run a bunch of queries
        for (int count = 0; count < max; count++) {
            long resultCount;
            String name;
            int i = r.nextInt(9);

            if (0 == i) {
                resultCount = docIdFetch(id, counts, actualNumResults);
                name = "docIdFetch";
            } else if (1 == i) {
                resultCount = columnFetch(id, REVISION_ID, counts, actualNumResults);
                name = "revisionIdFetch";
            } else if (2 == i) {
                resultCount = columnFetch(id, PAGE_ID, counts, actualNumResults);
                name = "pageIdFetch";
            } else if (3 == i) {
                groupBy(id, REVISION_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (4 == i) {
                groupBy(id, PAGE_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (5 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                name = "contributorUsernameFetch";
            } else if (6 == i) {
                groupBy(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorUsername";
            } else if (7 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_ID, counts, actualNumResults);
                name = "contributorIdFetch";
            } else {//if (8 == i) {
                groupBy(id, CONTRIBUTOR_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorID";
            }
        }
        System.out.println(Thread.currentThread().getName() + ": not deleting " + id);
        // Delete the results
        sw = new Stopwatch();

        sw.start();

        this.sorts.delete(id);
        sw.stop();

        System.out.println(Thread.currentThread().getName() + ": Took " + sw.toString() + " to delete results");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "deleteResults");

        iters++;
    }

    this.sorts.close();
}

From source file:com.cinchapi.concourse.server.ManagedConcourseServer.java

/**
 * Install a Concourse Server in {@code directory} using {@code installer}.
 * /*from  w  w  w .  j  ava 2 s .c  om*/
 * @param installer
 * @param directory
 * @return the server install directory
 */
private static String install(String installer, String directory) {
    try {
        Files.createDirectories(Paths.get(directory));
        Path binary = Paths.get(directory + File.separator + TARGET_BINARY_NAME);
        Files.deleteIfExists(binary);
        Files.copy(Paths.get(installer), binary);
        ProcessBuilder builder = new ProcessBuilder(Lists.newArrayList("sh", binary.toString()));
        builder.directory(new File(directory));
        builder.redirectErrorStream();
        Process process = builder.start();
        // The concourse-server installer prompts for an admin password in
        // order to make optional system wide In order to get around this
        // prompt, we have to "kill" the process, otherwise the server
        // install will hang.
        Stopwatch watch = Stopwatch.createStarted();
        while (watch.elapsed(TimeUnit.SECONDS) < 1) {
            continue;
        }
        watch.stop();
        process.destroy();
        TerminalFactory.get().restore();
        String application = directory + File.separator + "concourse-server"; // the install directory for the
                                                                              // concourse-server application
        process = Runtime.getRuntime().exec("ls " + application);
        List<String> output = Processes.getStdOut(process);
        if (!output.isEmpty()) {
            Files.deleteIfExists(Paths.get(application, "conf/concourse.prefs.dev")); // delete the dev prefs
                                                                                      // because those would
                                                                                      // take precedence over
                                                                                      // what is configured
                                                                                      // in this class
            configure(application);
            log.info("Successfully installed server in {}", application);
            return application;
        } else {
            throw new RuntimeException(MessageFormat.format(
                    "Unsuccesful attempt to " + "install server at {0} " + "using binary from {1}", directory,
                    installer));
        }

    } catch (Exception e) {
        throw Throwables.propagate(e);
    }

}