Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:com.facebook.buck.parser.cache.impl.RemoteManifestServiceCacheStorage.java

@Override
public void storeBuildFileManifest(HashCode weakFingerprint, HashCode strongFingerprint,
        byte[] serializedBuildFileManifest) throws IOException, InterruptedException {
    Stopwatch timer = Stopwatch.createStarted();
    try {/*from   w  w w  .  j a v  a  2 s.co  m*/
        if (!isWriteAllowed()) {
            return;
        }

        Manifest weakFingerprintManifest = new Manifest();
        weakFingerprintManifest.setKey(weakFingerprint.toString());
        weakFingerprintManifest
                .addToValues(ByteBuffer.wrap(strongFingerprint.toString().getBytes(StandardCharsets.UTF_8)));

        try {
            manifestService.appendToManifest(weakFingerprintManifest).get(TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (TimeoutException | ExecutionException e) {
            rethrow(e);
        }

        Manifest strongFingerprintManifest = new Manifest();
        strongFingerprintManifest.setKey(strongFingerprint.toString());
        strongFingerprintManifest.setValues(ImmutableList.of(ByteBuffer.wrap(serializedBuildFileManifest)));
        try {
            manifestService.setManifest(strongFingerprintManifest).get(TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (TimeoutException | ExecutionException e) {
            rethrow(e);
        }
    } finally {
        timer.stop();
        LOG.debug("Time to complete RemoteManifestServiceCacheStorage.storeBuildFileManifest method: %d.",
                timer.elapsed(TimeUnit.NANOSECONDS));
    }
}

From source file:io.flutter.run.daemon.FlutterAppListener.java

@Override
public void onAppProgressStarting(@NotNull DaemonEvent.AppProgress event) {
    progress.start(event.message);//from www.ja va2s .com
    if (event.getType().startsWith("hot.")) {
        stopwatch.set(Stopwatch.createStarted());
    }
}

From source file:org.locationtech.geogig.remote.BinaryPackedObjects.java

/**
 * @return the number of objects written
 *///from   w ww .j a  va2 s .  c o m
public long write(ObjectFunnel funnel, List<ObjectId> want, List<ObjectId> have, Set<ObjectId> sent,
        Callback callback, boolean traverseCommits, Deduplicator deduplicator) throws IOException {

    for (ObjectId i : want) {
        if (!database.exists(i)) {
            throw new NoSuchElementException(format("Wanted commit: '%s' is not known", i));
        }
    }

    LOGGER.info("scanning for previsit list...");
    Stopwatch sw = Stopwatch.createStarted();
    ImmutableList<ObjectId> needsPrevisit = traverseCommits ? scanForPrevisitList(want, have, deduplicator)
            : ImmutableList.copyOf(have);
    LOGGER.info(String.format("Previsit list built in %s for %,d ids: %s. Calculating reachable content ids...",
            sw.stop(), needsPrevisit.size(), needsPrevisit));

    deduplicator.reset();

    sw.reset().start();
    ImmutableList<ObjectId> previsitResults = reachableContentIds(needsPrevisit, deduplicator);
    LOGGER.info(String.format("reachableContentIds took %s for %,d ids", sw.stop(), previsitResults.size()));

    deduplicator.reset();

    LOGGER.info("obtaining post order iterator on range...");
    sw.reset().start();

    Iterator<RevObject> objects = PostOrderIterator.range(want, new ArrayList<ObjectId>(previsitResults),
            database, traverseCommits, deduplicator);
    long objectCount = 0;
    LOGGER.info("PostOrderIterator.range took {}", sw.stop());

    try {
        LOGGER.info("writing objects to remote...");
        while (objects.hasNext()) {
            RevObject object = objects.next();
            funnel.funnel(object);
            objectCount++;
            callback.callback(Suppliers.ofInstance(object));
        }
    } catch (IOException e) {
        String causeMessage = Throwables.getRootCause(e).getMessage();
        LOGGER.info(String.format("writing of objects failed after %,d objects. Cause: '%s'", objectCount,
                causeMessage));
        throw e;
    }
    return objectCount;
}

From source file:org.bitcoinj_extra.params.AbstractBitcoinNetParams.java

@Override
public void checkDifficultyTransitions(final StoredBlock storedPrev, final Block nextBlock,
        final BlockStore blockStore) throws VerificationException, BlockStoreException {
    Block prev = storedPrev.getHeader();

    // Is this supposed to be a difficulty transition point?
    if (!isDifficultyTransitionPoint(storedPrev)) {

        // No ... so check the difficulty didn't actually change.
        if (nextBlock.getDifficultyTarget() != prev.getDifficultyTarget())
            throw new VerificationException("Unexpected change in difficulty at height "
                    + storedPrev.getHeight() + ": " + Long.toHexString(nextBlock.getDifficultyTarget()) + " vs "
                    + Long.toHexString(prev.getDifficultyTarget()));
        return;//from   w w  w.  j  av  a 2  s  . c  o m
    }

    // We need to find a block far back in the chain. It's OK that this is expensive because it only occurs every
    // two weeks after the initial block chain download.
    final Stopwatch watch = Stopwatch.createStarted();
    StoredBlock cursor = blockStore.get(prev.getHash());
    for (int i = 0; i < this.getInterval() - 1; i++) {
        if (cursor == null) {
            // This should never happen. If it does, it means we are following an incorrect or busted chain.
            throw new VerificationException(
                    "Difficulty transition point but we did not find a way back to the genesis block.");
        }
        cursor = blockStore.get(cursor.getHeader().getPrevBlockHash());
    }
    watch.stop();
    if (watch.elapsed(TimeUnit.MILLISECONDS) > 50)
        log.info("Difficulty transition traversal took {}", watch);

    Block blockIntervalAgo = cursor.getHeader();
    int timespan = (int) (prev.getTimeSeconds() - blockIntervalAgo.getTimeSeconds());
    // Limit the adjustment step.
    final int targetTimespan = this.getTargetTimespan();
    if (timespan < targetTimespan / 4)
        timespan = targetTimespan / 4;
    if (timespan > targetTimespan * 4)
        timespan = targetTimespan * 4;

    BigInteger newTarget = Utils.decodeCompactBits(prev.getDifficultyTarget());
    newTarget = newTarget.multiply(BigInteger.valueOf(timespan));
    newTarget = newTarget.divide(BigInteger.valueOf(targetTimespan));

    if (newTarget.compareTo(this.getMaxTarget()) > 0) {
        log.info("Difficulty hit proof of work limit: {}", newTarget.toString(16));
        newTarget = this.getMaxTarget();
    }

    int accuracyBytes = (int) (nextBlock.getDifficultyTarget() >>> 24) - 3;
    long receivedTargetCompact = nextBlock.getDifficultyTarget();

    // The calculated difficulty is to a higher precision than received, so reduce here.
    BigInteger mask = BigInteger.valueOf(0xFFFFFFL).shiftLeft(accuracyBytes * 8);
    newTarget = newTarget.and(mask);
    long newTargetCompact = Utils.encodeCompactBits(newTarget);

    if (newTargetCompact != receivedTargetCompact)
        throw new VerificationException("Network provided difficulty bits do not match what was calculated: "
                + Long.toHexString(newTargetCompact) + " vs " + Long.toHexString(receivedTargetCompact));
}

From source file:io.ecarf.core.cloud.task.processor.dictionary.AssembleDictionaryTask.java

@Override
public void run() throws IOException {

    log.info("Assembling dictionary, memory usage: " + Utils.getMemoryUsageInGB() + "GB");

    Stopwatch stopwatch = Stopwatch.createStarted();

    List<StorageObject> objects = this.cloudService.listCloudStorageObjects(bucket);

    //Set<String> files = new HashSet<>();

    List<Item> items = new ArrayList<>();

    for (StorageObject object : objects) {

        String filename = object.getName();

        if (filename.endsWith(FilenameUtils.KRYO_SERIALIZED_EXT)) {
            //files.add(filename);
            items.add(new Item(filename, object.getSize().longValue()));
        }/*from  w w  w .  j  a va  2 s.  c  om*/
    }

    log.info("Found " + items.size() + ", serialized files");

    int processors = Runtime.getRuntime().availableProcessors();

    BinPackingPartition function = new BinPackingPartition(items);
    function.setMaxBinItems((long) processors);
    List<Partition> partitions = function.partition();

    TermDictionary dictionary = TermDictionary.populateRDFOWLData(new TermDictionaryConcurrent());

    List<Callable<Void>> tasks = getSubTasks(partitions, dictionary);

    try {

        // check if we only have one file to process
        if (tasks.size() == 1) {

            tasks.get(0).call();

        } else if (processors == 1) {
            // only one process then process synchronously

            for (Callable<Void> task : tasks) {
                task.call();
            }

        } else {

            // multiple cores
            ExecutorService executor = Utils.createFixedThreadPool(processors);

            try {

                executor.invokeAll(tasks);

            } finally {
                executor.shutdown();
            }
        }

        tasks = null;

    } catch (Exception e) {
        log.error("Failed to process multiple files", e);
        throw new IOException(e);

    }

    int dicSize = dictionary.size();

    log.info("Successfully assembled dictionary with size: " + dicSize + ", max resourceId: "
            + dictionary.getLargestResourceId() + ", memory usage: " + Utils.getMemoryUsageInGB() + "GB"
            + ", timer: " + stopwatch);

    // extract the terms and encode the schema if needed
    if (StringUtils.isNotBlank(this.schemaFile) && StringUtils.isNotBlank(this.schemaBucket)) {
        this.encodeSchema(dictionary);
    }

    // encode the term stats file is needed
    if (StringUtils.isNotBlank(this.termStatsFile) && StringUtils.isNotBlank(this.encodedTermStatsFile)) {
        this.encodeTermsStats(dictionary);
    }

    // if no name provided for the dictionary file then create a default
    if (StringUtils.isBlank(this.dictionaryFile)) {
        this.dictionaryFile = this.cloudService.getInstanceId() + '_'
                + FilenameUtils.getSerializedGZipedDictionaryFilename();
    }

    this.dictionaryFile = FilenameUtils.getLocalFilePath(this.dictionaryFile);

    dictionary = ((ConcurrentDictionary) dictionary).getNonConcurrentDictionary();

    log.info("Successfully created non concurrent dictionary for serialization, memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

    dictionary.toFile(dictionaryFile, true);

    dictionary = null;

    log.info("Successfully serialized dictionary with size: " + dicSize + ", memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

    if (StringUtils.isBlank(this.targetBucket)) {
        this.targetBucket = bucket;
    }

    this.cloudService.uploadFileToCloudStorage(dictionaryFile, this.targetBucket);

    log.info("Successfully assembled, serialized and uploaded dictionary, memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

}

From source file:com.google.cloud.tools.eclipse.test.util.ThreadDumpingWatchdog.java

@Override
public void run() {
    Stopwatch dumpingTime = Stopwatch.createStarted();
    ThreadMXBean bean = ManagementFactory.getThreadMXBean();
    ThreadInfo[] infos = bean.dumpAllThreads(true, true);
    Arrays.sort(infos, new Comparator<ThreadInfo>() {
        @Override// w  ww.j a va 2  s.  co  m
        public int compare(ThreadInfo o1, ThreadInfo o2) {
            return Long.compare(o1.getThreadId(), o2.getThreadId());
        }
    });

    StringBuilder sb = new StringBuilder();
    sb.append("\n+-------------------------------------------------------------------------------");
    sb.append("\n| STACK DUMP @ ").append(stopwatch).append(": ").append(description);
    sb.append("\n|");
    dumpEclipseLocks(sb, "| ");

    int uselessThreadsCount = 0;
    for (ThreadInfo tinfo : infos) {
        // Unfortunately ThreadInfo#toString() only dumps up to 8 stackframes, and
        // this value is not configurable :-(
        if (!isUselessThread(tinfo)) {
            dumpThreadInfo(sb, "| ", tinfo);
        } else {
            uselessThreadsCount++;
        }
    }
    if (uselessThreadsCount > 0) {
        sb.append("\n| Ignored threads:");
        for (ThreadInfo tinfo : infos) {
            if (isUselessThread(tinfo)) {
                sb.append("\n|   ");
                dumpThreadHeader(sb, tinfo);
            }
        }
    }
    sb.append("\n| ELAPSED TIME: ").append(dumpingTime);
    sb.append("\n+-------------------------------------------------------------------------------");
    System.err.println(sb.toString());
}

From source file:com.google.cloud.genomics.dataflow.readers.bam.Reader.java

public void process() throws IOException {
    timer = Stopwatch.createStarted();
    openFile();//from   w  w w. j a v a2s.  c o  m

    while (iterator.hasNext()) {
        processRecord(iterator.next());
    }

    dumpStats();
}

From source file:org.apache.drill.exec.store.kafka.MessageIterator.java

@Override
public boolean hasNext() {
    if (recordIter != null && recordIter.hasNext()) {
        return true;
    }//from ww  w  . j ava  2 s.c  om

    long nextPosition = kafkaConsumer.position(topicPartition);
    if (nextPosition >= endOffset) {
        return false;
    }

    ConsumerRecords<byte[], byte[]> consumerRecords = null;
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        consumerRecords = kafkaConsumer.poll(kafkaPollTimeOut);
    } catch (KafkaException ke) {
        logger.error(ke.getMessage(), ke);
        throw UserException.dataReadError(ke).message(ke.getMessage()).build(logger);
    }
    stopwatch.stop();

    if (consumerRecords.isEmpty()) {
        String errorMsg = new StringBuilder().append("Failed to fetch messages within ")
                .append(kafkaPollTimeOut)
                .append(" milliseconds. Consider increasing the value of the property : ")
                .append(ExecConstants.KAFKA_POLL_TIMEOUT).toString();
        throw UserException.dataReadError().message(errorMsg).build(logger);
    }

    long lastFetchTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
    logger.debug("Total number of messages fetched : {}", consumerRecords.count());
    logger.debug("Time taken to fetch : {} milliseconds", lastFetchTime);
    totalFetchTime += lastFetchTime;

    recordIter = consumerRecords.iterator();
    return recordIter.hasNext();
}

From source file:com.b2international.snowowl.datastore.server.snomed.merge.rules.SnomedDonatedComponentResolverRule.java

@Override
public Collection<MergeConflict> validate(final CDOTransaction transaction) {

    Stopwatch stopwatch = Stopwatch.createStarted();

    // XXX This is important to avoid ObjectNotFoundExceptions due to the removal of extension concepts
    transaction.options().setStaleReferencePolicy(CDOStaleReferencePolicy.PROXY);

    Map<CDOID, Component> newComponentsMap = StreamSupport
            .stream(ComponentUtils2.getNewObjects(transaction, Component.class).spliterator(), false)
            .collect(toMap(CDOObject::cdoID, Function.identity()));

    Iterable<Relationship> allNewAndDirtyRelationships = Iterables.concat(
            ComponentUtils2.getNewObjects(transaction, Relationship.class),
            ComponentUtils2.getDirtyObjects(transaction, Relationship.class));

    Multimap<CDOID, Relationship> destinationToRelationshipsMap = HashMultimap.create();

    StreamSupport.stream(allNewAndDirtyRelationships.spliterator(), false)
            .filter(r -> !newDonatedComponents.containsKey(r.getSource().cdoID())
                    && newDonatedComponents.containsKey(r.getDestination().cdoID()))
            .forEach(r -> destinationToRelationshipsMap.put(r.getDestination().cdoID(), r));

    for (final Entry<CDOID, CDOID> entry : newDonatedComponents.entrySet()) {

        final CDOID sourceCDOID = entry.getKey();
        final CDOID targetCDOID = entry.getValue();

        final Optional<CDOObject> sourceComponent = Optional.ofNullable(newComponentsMap.get(sourceCDOID));
        final Optional<CDOObject> targetComponent = Optional
                .ofNullable(CDOUtils.getObjectIfExists(transaction, targetCDOID));

        if (sourceComponent.isPresent() && targetComponent.isPresent()) {

            if (sourceComponent.get() instanceof Concept && targetComponent.get() instanceof Concept) {

                final Concept extensionConcept = (Concept) sourceComponent.get();
                final Concept donatedConcept = (Concept) targetComponent.get();

                LOGGER.info(">>> Processing donated concept with id '{}'", donatedConcept.getId());

                unfreezeRevision(extensionConcept);

                final List<Description> additionalExtensionDescriptions = extensionConcept.getDescriptions()
                        .stream()/*from  w  w  w .j a  v a 2 s  .co  m*/
                        .filter(extension -> !donatedConcept.getDescriptions().stream()
                                .anyMatch(donated -> donated.getId().equals(extension.getId())))
                        .collect(toList());

                final List<Relationship> additionalExtensionRelationships = extensionConcept
                        .getOutboundRelationships().stream()
                        .filter(extension -> !donatedConcept.getOutboundRelationships().stream()
                                .anyMatch(donated -> donated.getId().equals(extension.getId())))
                        .collect(toList());

                // association refset members?
                // inactivation indicator refset members?
                // concrete domain refset members?

                // handle inbound relationships
                if (destinationToRelationshipsMap.containsKey(extensionConcept.cdoID())) {

                    Collection<Relationship> inboundRelationships = destinationToRelationshipsMap
                            .get(extensionConcept.cdoID());

                    for (Relationship relationship : inboundRelationships) {

                        Concept relationshipSourceConcept = relationship.getSource();

                        LOGGER.info("Replacing inbound reference from '{}' to '{}' with id '{}'",
                                relationshipSourceConcept.getId(), donatedConcept.getId(),
                                relationship.getId());

                        unfreezeRevision(relationshipSourceConcept);
                        relationship.setDestination(donatedConcept);

                    }

                }

                EcoreUtil.remove(extensionConcept);

                for (final Description extensionDescription : additionalExtensionDescriptions) {
                    LOGGER.info("Adding extension description to the donated version '{}' - '{}'",
                            extensionDescription.getId(), extensionDescription.getTerm());
                    donatedConcept.getDescriptions().add(extensionDescription);
                }

                for (final Relationship extensionRelationship : additionalExtensionRelationships) {

                    if (newDonatedComponents.containsKey(extensionRelationship.getDestination().cdoID())) {

                        Optional<Concept> newDestinationConcept = Optional
                                .ofNullable(CDOUtils.getObjectIfExists(transaction, newDonatedComponents
                                        .get(extensionRelationship.getDestination().cdoID())));

                        if (newDestinationConcept.isPresent()) {

                            LOGGER.info("Replacing outbound reference from '{}' to '{}' with id '{}'",
                                    donatedConcept.getId(), newDestinationConcept.get().getId(),
                                    extensionRelationship.getId());

                            extensionRelationship.setDestination(newDestinationConcept.get());
                        }
                    }

                    LOGGER.info("Adding extension relationship to the donated version with id '{}'",
                            extensionRelationship.getId());

                    donatedConcept.getOutboundRelationships().add(extensionRelationship);
                }

                LOGGER.info("<<< Processed donated concept with id '{}'", extensionConcept.getId());

            } else if (sourceComponent.get() instanceof Description
                    && targetComponent.get() instanceof Description) {

                final Description extensionDescription = (Description) sourceComponent.get();
                final Description donatedDescription = (Description) targetComponent.get();

                LOGGER.info(">>> Processing donated description with id '{}'", extensionDescription.getId());

                EcoreUtil.remove(extensionDescription);

                // association refset members?
                // inactivation indicator refset members?

                donatedDescription.getLanguageRefSetMembers()
                        .addAll(extensionDescription.getLanguageRefSetMembers());

                LOGGER.info("<<< Processed donated description with id '{}'", extensionDescription.getId());

            } else if (sourceComponent.get() instanceof Relationship
                    && targetComponent.get() instanceof Relationship) {

                final Relationship sourceRelationship = (Relationship) sourceComponent.get();

                LOGGER.info(">>> Processing donated relationship with id '{}'", sourceRelationship.getId());

                EcoreUtil.remove(sourceRelationship);

                // concrete domain members?

                LOGGER.info("<<< Processed donated relationship with id '{}'", sourceRelationship.getId());
            }

        }

    }

    for (final CDOID id : changedDonatedComponents) {

        final Optional<CDOObject> object = Optional.ofNullable(CDOUtils.getObjectIfExists(transaction, id));

        if (object.isPresent()) {

            if (object.get() instanceof Component) {

                final Component component = (Component) object.get();

                transaction.getLastSavepoint().getDirtyObjects().remove(id);
                transaction.getLastSavepoint().getRevisionDeltas().remove(id);

                LOGGER.info("Keeping latest ({}) version of donated component '{}' with id '{}'",
                        EffectiveTimes.format(component.getEffectiveTime()), component.eClass().getName(),
                        component.getId());
            }
        }
    }

    LOGGER.info("Donated component resolution finished in {}", TimeUtil.toString(stopwatch));

    return emptySet();
}

From source file:com.github.ibole.infrastructure.security.jwt.jose4j.EcJose4jTokenAuthenticator.java

/**
 * Create Access Token.//w  ww. j  a  v a2  s. c om
 * 
 */
@Override
public String createAccessToken(JwtObject claimObj) throws TokenHandlingException {
    Preconditions.checkArgument(claimObj != null, "Parameter claimObj cannot be null");
    final Stopwatch stopwatch = Stopwatch.createStarted();
    String token = null;
    try {
        if (!Constants.ANONYMOUS_ID.equalsIgnoreCase(claimObj.getLoginId())
                && !getRedisTemplate().exists(getRefreshTokenKey(claimObj.getLoginId()))) {
            throw new RefreshTokenNotFoundException("Refresh token not found.");
        }
        token = JoseUtils.createJwtWithECKey(claimObj, (EllipticCurveJsonWebKey) ecJsonWebKey);
        getRedisTemplate().hset(getRefreshTokenKey(claimObj.getLoginId()), Constants.ACCESS_TOKEN, token);
    } catch (JoseException ex) {
        logger.error("Error happened when generating the jwt token.", ex);
        throw new TokenHandlingException(ex);
    }
    String elapsedString = Long.toString(stopwatch.elapsed(TimeUnit.MILLISECONDS));
    logger.debug("Create access token elapsed time: {} ms", elapsedString);
    return token;
}