Example usage for com.google.common.base Stopwatch elapsed

List of usage examples for com.google.common.base Stopwatch elapsed

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch elapsed.

Prototype

@CheckReturnValue
public long elapsed(TimeUnit desiredUnit) 

Source Link

Document

Returns the current elapsed time shown on this stopwatch, expressed in the desired time unit, with any fraction rounded down.

Usage

From source file:com.isotrol.impe3.pms.core.impl.SessionsServiceImpl.java

private SessionDTO providerLogin(EnvironmentEntity env, UserEntity user, Credentials credentials) {
    final ExternalUserDataDTO data;
    final Stopwatch w = Stopwatch.createStarted();
    try {//from  ww w.  j  av  a  2  s . c o  m
        data = authenticationProvider.authenticate(credentials);
    } catch (InvalidCredentialsException e) {
        if (user != null) {
            registerAttempt(env, user, false);
        }
        return null;
    } finally {
        long t = w.elapsed(TimeUnit.MILLISECONDS);
        if (t > 250) {
            logger.warn(String.format("Provider login took [%d] ms", t));
        }
    }
    if (data == null) {
        return null;
    }
    if (user == null) {
        if (!data.isCreate()) {
            return null;
        }
        user = new UserEntity();
        user.setName(data.getName());
        user.setDisplayName(data.getDisplayName());
        user.getGlobalAuthorities();
        user.getGlobalRoles();
        user.getPortalAuthorities();
        user.setCreated(userManager.getRootUser());
        user.setUpdated(userManager.getRootUser());
        user.setId(newUUID());
        userDAO.save(user);
        sync();
    } else {
        if (data.isUpdate()) {
            user.setDisplayName(data.getDisplayName());
            user.setUpdated(userManager.getRootUser());
        }
    }
    final UUID id = user.getId();
    final SessionDTO dto = new SessionDTO();
    dto.setId(Identifiables.toStringId(id));
    dto.setName(user.getDisplayName());
    SecurityContext.set(new SecurityContext(id));
    dto.setAuthorization(new Authorization(loadContextGlobal().getPortals(), user).toDTO());
    registerAttempt(env, user, true);
    return dto;
}

From source file:com.fireball1725.firelib.FireMod.java

@Mod.EventHandler
public final void preInit(FMLPreInitializationEvent event) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    this.getLogger().info("Pre Initialization (Started)");

    // Check java version to make sure we are on Java 1.8
    if (!SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_1_8)) {
        //throw new OutdatedJavaException(String.format("%s requires Java 8 or newer, Please update your java", ModInfo.MOD_NAME));
    }/*from   w  w  w  .j  av a  2s.c  o  m*/

    this.proxy().registerEventHandler(this);
    proxy().initConfiguration(event);
    proxy().preInitStart(event);
    proxy().registerEventHandler(new RegistrationHelper(this));
    proxy().preInitEnd(event);

    this.getLogger()
            .info("Pre Initialization (Ended after " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + "ms)");
}

From source file:ezbake.frack.common.workers.ProvenanceBatchWorker.java

/**
 * Flushes the entries accumulated thus far and writes to the Provenance service.
 *
 * @throws IOException if outputToPipes throws IOException
 *//*from w  ww.j  av a2  s .  c  o m*/
private void flushQueue() throws IOException {
    ProvenanceService.Client provenanceClient = null;
    synchronized (this) {
        try {
            provenanceClient = this.pool.getClient(ProvenanceServiceConstants.SERVICE_NAME,
                    ProvenanceService.Client.class);
            Stopwatch stopwatch = Stopwatch.createStarted();

            Map<String, AddDocumentResult> resultMap;
            resultMap = provenanceClient.addDocuments(
                    this.securityClient
                            .fetchAppToken(this.pool.getSecurityId(ProvenanceServiceConstants.SERVICE_NAME)),
                    addDocumentEntrySet, ageOffMappingSet);
            logger.info("Provenanced {} documents | {} ms", addDocumentEntrySet.size(),
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
            logger.debug("Sending successfully provenanced data to pipes");

            for (Map.Entry<String, UriToObjectItem> entry : uriToObjectMap.entrySet()) {
                String uri = entry.getKey();
                if (!resultMap.containsKey(uri)) {
                    logger.debug("ERROR: URI NOT found in result map: {}", uri);
                    UriToObjectItem uriToObjectItem = entry.getValue();
                    quarantine(uri, uriToObjectItem.object, uriToObjectItem.visibility);
                } else {
                    final AddDocumentResult addDocumentResult = resultMap.get(uri);
                    AddDocumentStatus status = addDocumentResult.getStatus();

                    UriToObjectItem uriToObjectItem = entry.getValue();
                    if (status == AddDocumentStatus.SUCCESS
                            || (status == AddDocumentStatus.ALREADY_EXISTS && outputDuplicates)) {
                        outputToPipes(uriToObjectItem.visibility, uriToObjectItem.object);
                    } else {
                        logger.debug("NOT sending document to pipes: uri={}", uri);
                        // do not quarantine duplicates
                        if (status != AddDocumentStatus.ALREADY_EXISTS) {
                            quarantine(uri, uriToObjectItem.object, uriToObjectItem.visibility);
                        }
                    }
                }
            }
            clearArtifacts();
            this.pool.returnToPool(provenanceClient);

        } catch (TException e) {
            this.pool.returnBrokenToPool(provenanceClient);
            logger.error("Error during batching of documents for Provenance registration.", e);
            throw new IOException(e);
        }
    }
}

From source file:com.google.api.ads.adwords.awreporting.downloader.MultipleClientReportDownloader.java

/**
 * Downloads the specified report for all specified CIDs. Prints out list of failed CIDs. Returns
 * List<File> for all successful downloads.
 *
 * @param reportDefinition Report to download.
 * @param cids CIDs to download the report for.
 * @return Collection of File objects of downloaded/unzipped reports.
 * @throws InterruptedException error trying to stop downloader thread.
 * @throws ValidationException //from  w  w w.  ja  v a  2s  .  com
 */
public Collection<File> downloadReports(final AdWordsSessionBuilderSynchronizer sessionBuilder,
        final ReportDefinition reportDefinition, final Set<Long> accountIds)
        throws InterruptedException, ValidationException {

    final Collection<Long> failed = new ConcurrentSkipListSet<Long>();
    final Collection<File> results = new ConcurrentSkipListSet<File>();

    // We use a Latch so the main thread knows when all the worker threads are complete.
    final CountDownLatch latch = new CountDownLatch(accountIds.size());

    Stopwatch stopwatch = Stopwatch.createStarted();

    for (final Long accountId : accountIds) {

        // We create a copy of the AdWordsSession specific for the Account
        AdWordsSession adWordsSession = sessionBuilder.getAdWordsSessionCopy(accountId);

        RunnableDownloader downloader = new RunnableDownloader(this.retriesCount, this.backoffInterval,
                this.bufferSize, accountId, reportDefinition, adWordsSession, results);
        downloader.setFailed(failed);
        executeRunnableDownloader(downloader, latch);
    }

    latch.await();
    stopwatch.stop();
    return this.printResultsAndReturn(results, stopwatch.elapsed(TimeUnit.MILLISECONDS), failed, accountIds);
}

From source file:ch.ge.ve.protopoc.service.protocol.DefaultAuthority.java

@Override
public ObliviousTransferResponse handleBallot(Integer voterIndex, BallotAndQuery ballotAndQuery) {
    Preconditions.checkState(publicCredentials != null,
            "The public credentials need to have been retrieved first");

    log.info(String.format("Authority %d handling ballot", j));

    Stopwatch stopwatch = Stopwatch.createStarted();
    List<BigInteger> publicIdentificationCredentials = publicCredentials.stream().map(p -> p.x)
            .collect(Collectors.toList());
    if (!voteCastingAuthorityAlgorithms.checkBallot(voterIndex, ballotAndQuery, systemPublicKey,
            publicIdentificationCredentials, ballotEntries)) {
        throw new IncorrectBallotRuntimeException(
                String.format("Ballot for voter %d was deemed invalid", voterIndex));
    }//from   w ww . ja  va 2 s.  c  o m
    stopwatch.stop();
    ballotVerificationTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    stopwatch.reset().start();
    ObliviousTransferResponseAndRand responseAndRand = voteCastingAuthorityAlgorithms.genResponse(voterIndex,
            ballotAndQuery.getBold_a(), systemPublicKey, electionSet.getBold_n(), electorateData.getK(),
            electorateData.getP());
    ballotEntries.add(new BallotEntry(voterIndex, ballotAndQuery, responseAndRand.getBold_r()));
    ObliviousTransferResponse beta = responseAndRand.getBeta();
    stopwatch.stop();
    queryResponseTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    return beta;
}

From source file:org.apache.drill.exec.store.kafka.KafkaRecordReader.java

/**
 * KafkaConsumer.poll will fetch 500 messages per poll call. So hasNext will
 * take care of polling multiple times for this given batch next invocation
 *///from  w  w  w. ja v  a 2 s  .  c o  m
@Override
public int next() {
    writer.allocate();
    writer.reset();
    Stopwatch watch = Stopwatch.createStarted();
    int messageCount = 0;

    try {
        while (currentOffset < subScanSpec.getEndOffset() - 1 && msgItr.hasNext()) {
            ConsumerRecord<byte[], byte[]> consumerRecord = msgItr.next();
            currentOffset = consumerRecord.offset();
            writer.setPosition(messageCount);
            messageReader.readMessage(consumerRecord);
            if (++messageCount >= DEFAULT_MESSAGES_PER_BATCH) {
                break;
            }
        }

        messageReader.ensureAtLeastOneField();
        writer.setValueCount(messageCount);
        logger.debug("Took {} ms to process {} records.", watch.elapsed(TimeUnit.MILLISECONDS), messageCount);
        logger.debug("Last offset consumed for {}:{} is {}", subScanSpec.getTopicName(),
                subScanSpec.getPartitionId(), currentOffset);
        return messageCount;
    } catch (Exception e) {
        String msg = "Failure while reading messages from kafka. Recordreader was at record: "
                + (messageCount + 1);
        throw UserException.dataReadError(e).message(msg).addContext(e.getMessage()).build(logger);
    }
}

From source file:com.google.devtools.build.android.AndroidParsedDataDeserializer.java

/**
 * Reads the serialized {@link DataKey} and {@link DataValue} to the {@link KeyValueConsumers}.
 *
 * @param inPath The path to the serialized protocol buffer.
 * @param consumers The {@link KeyValueConsumers} for the entries {@link DataKey} -&gt; {@link
 *     DataValue}./*from  w w  w.  j av a  2 s  .  co m*/
 * @throws DeserializationException Raised for an IOException or when the inPath is not a valid
 *     proto buffer.
 */
@Override
public void read(Path inPath, KeyValueConsumers consumers) {
    Stopwatch timer = Stopwatch.createStarted();
    try (InputStream in = Files.newInputStream(inPath, StandardOpenOption.READ)) {
        FileSystem currentFileSystem = inPath.getFileSystem();
        Header header = Header.parseDelimitedFrom(in);
        if (header == null) {
            throw new DeserializationException("No Header found in " + inPath);
        }
        readEntriesSegment(consumers, in, currentFileSystem, header);
    } catch (IOException e) {
        throw new DeserializationException("Error deserializing " + inPath, e);
    } finally {
        logger.fine(String.format("Deserialized in merged in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
    }
}

From source file:ch.ge.ve.protopoc.service.protocol.DefaultAuthority.java

@Override
public FinalizationCodePart handleConfirmation(Integer voterIndex, Confirmation confirmation)
        throws IncorrectConfirmationRuntimeException {
    Preconditions.checkState(publicCredentials != null,
            "The public credentials need to have been retrieved first");
    Stopwatch stopwatch = Stopwatch.createStarted();
    List<BigInteger> publicConfirmationCredentials = publicCredentials.stream().map(p -> p.y)
            .collect(Collectors.toList());

    if (!voteConfirmationAuthorityAlgorithms.checkConfirmation(voterIndex, confirmation,
            publicConfirmationCredentials, ballotEntries, confirmationEntries)) {
        throw new IncorrectConfirmationRuntimeException(
                "Confirmation for voter " + voterIndex + " was deemed invalid");
    }//from w  w w  . j a  v  a 2s  . co m
    stopwatch.stop();
    confirmationVerificationTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    confirmationEntries.add(new ConfirmationEntry(voterIndex, confirmation));

    stopwatch.reset().start();
    FinalizationCodePart finalization = voteConfirmationAuthorityAlgorithms.getFinalization(voterIndex,
            electorateData.getP(), ballotEntries);
    stopwatch.stop();
    finalizationComputationTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    return finalization;
}

From source file:io.takari.maven.plugins.compile.jdt.ClasspathDigester.java

public HashMap<String, byte[]> digestDependencies(List<File> dependencies) throws IOException {
    Stopwatch stopwatch = Stopwatch.createStarted();

    HashMap<String, byte[]> digest = new HashMap<String, byte[]>();

    // scan dependencies backwards to properly deal with duplicate type definitions
    for (int i = dependencies.size() - 1; i >= 0; i--) {
        File file = dependencies.get(i);
        if (file.isFile()) {
            digest.putAll(digestJar(file));
        } else if (file.isDirectory()) {
            digest.putAll(digestDirectory(file));
        } else {//w  ww  .  j  ava2  s.  c  o m
            // happens with reactor dependencies with empty source folders
            continue;
        }
    }

    log.debug("Analyzed {} classpath dependencies ({} ms)", dependencies.size(),
            stopwatch.elapsed(TimeUnit.MILLISECONDS));

    return digest;
}

From source file:org.grouplens.lenskit.eval.traintest.TrainTestJob.java

@SuppressWarnings("PMD.AvoidCatchingThrowable")
private void runEvaluation() throws IOException, RecommenderBuildException {
    EventBus bus = task.getProject().getEventBus();
    bus.post(JobEvents.started(this));
    Closer closer = Closer.create();/* w  w w  .  j  a va2 s. c  o  m*/
    try {
        outputs = task.getOutputs().getPrefixed(algorithmInfo, dataSet);
        TableWriter userResults = outputs.getUserWriter();
        List<Object> outputRow = Lists.newArrayList();

        logger.info("Building {} on {}", algorithmInfo, dataSet);
        StopWatch buildTimer = new StopWatch();
        buildTimer.start();
        buildRecommender();
        buildTimer.stop();
        logger.info("Built {} in {}", algorithmInfo.getName(), buildTimer);

        logger.info("Measuring {} on {}", algorithmInfo.getName(), dataSet.getName());

        StopWatch testTimer = new StopWatch();
        testTimer.start();
        List<Object> userRow = Lists.newArrayList();

        List<MetricWithAccumulator<?>> accumulators = Lists.newArrayList();

        for (Metric<?> eval : outputs.getMetrics()) {
            accumulators.add(makeMetricAccumulator(eval));
        }

        LongSet testUsers = dataSet.getTestData().getUserDAO().getUserIds();
        final NumberFormat pctFormat = NumberFormat.getPercentInstance();
        pctFormat.setMaximumFractionDigits(2);
        pctFormat.setMinimumFractionDigits(2);
        final int nusers = testUsers.size();
        logger.info("Testing {} on {} ({} users)", algorithmInfo, dataSet, nusers);
        int ndone = 0;
        for (LongIterator iter = testUsers.iterator(); iter.hasNext();) {
            if (Thread.interrupted()) {
                throw new InterruptedException("eval job interrupted");
            }
            long uid = iter.nextLong();
            userRow.add(uid);
            userRow.add(null); // placeholder for the per-user time
            assert userRow.size() == 2;

            Stopwatch userTimer = Stopwatch.createStarted();
            TestUser test = getUserResults(uid);

            userRow.add(test.getTrainHistory().size());
            userRow.add(test.getTestHistory().size());

            for (MetricWithAccumulator<?> accum : accumulators) {
                List<Object> ures = accum.measureUser(test);
                if (ures != null) {
                    userRow.addAll(ures);
                }
            }
            userTimer.stop();
            userRow.set(1, userTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001);
            if (userResults != null) {
                try {
                    userResults.writeRow(userRow);
                } catch (IOException e) {
                    throw new RuntimeException("error writing user row", e);
                }
            }
            userRow.clear();

            ndone += 1;
            if (ndone % 100 == 0) {
                testTimer.split();
                double time = testTimer.getSplitTime();
                double tpu = time / ndone;
                double tleft = (nusers - ndone) * tpu;
                logger.info("tested {} of {} users ({}), ETA {}", ndone, nusers,
                        pctFormat.format(((double) ndone) / nusers),
                        DurationFormatUtils.formatDurationHMS((long) tleft));
            }
        }
        testTimer.stop();
        logger.info("Tested {} in {}", algorithmInfo.getName(), testTimer);

        writeMetricValues(buildTimer, testTimer, outputRow, accumulators);
        bus.post(JobEvents.finished(this));
    } catch (Throwable th) {
        bus.post(JobEvents.failed(this, th));
        throw closer.rethrow(th, RecommenderBuildException.class);
    } finally {
        try {
            cleanup();
        } finally {
            outputs = null;
            closer.close();
        }
    }
}