Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:com.spotify.heroic.metric.QueryTrace.java

/**
 * Create a new watch.// w w  w  .  ja  v a2  s  . c  om
 *
 * @return a {@link com.spotify.heroic.metric.QueryTrace.Watch}
 * @deprecated use {@link Tracing#watch()}
 */
static Watch watch() {
    return new ActiveWatch(Stopwatch.createStarted());
}

From source file:com.github.ibole.microservice.security.auth.jwt.jose4j.EcJose4jTokenAuthenticator.java

/**
 * Create Refresh Token.//from w w w .  j a va  2  s .  c om
 */
@Override
public String createRefreshToken(JwtObject claimObj) throws TokenHandlingException {
    Preconditions.checkArgument(claimObj != null, "Parameter claimObj cannot be null");
    final Stopwatch stopwatch = Stopwatch.createStarted();
    String token = null;
    try {

        token = JoseUtils.createJwtWithECKey(claimObj, (EllipticCurveJsonWebKey) ecJsonWebKey);
        getCacheService().set(getRefreshTokenKey(claimObj.getLoginId()), Constants.REFRESH_TOKEN, token);
        getCacheService().set(getRefreshTokenKey(claimObj.getLoginId()), Constants.CLIENT_ID,
                claimObj.getClientId());
        getCacheService().expire(getRefreshTokenKey(claimObj.getLoginId()), claimObj.getTtlSeconds());

    } catch (JoseException ex) {
        logger.error("Error happened when generating the jwt token.", ex);
        throw new TokenHandlingException(ex);
    }
    String elapsedString = Long.toString(stopwatch.elapsed(TimeUnit.MILLISECONDS));
    logger.debug("Create refresh token elapsed time: {} ms", elapsedString);
    return token;
}

From source file:com.google.api.ads.adwords.awalerting.processor.AlertProcessor.java

/**
 * Generate all the alerts for the given account IDs under the manager account.
 *
 * @param clientCustomerIds the client customer IDs
 * @param alertsConfig the JSON config of the alerts
 *//*from  www  .  jav a 2s  .c  om*/
public void generateAlerts(Set<Long> clientCustomerIds, JsonObject alertsConfig)
        throws AlertConfigLoadException, AlertProcessingException {
    Stopwatch stopwatch = Stopwatch.createStarted();

    ImmutableAdWordsSession session = null;
    try {
        session = authenticator.authenticate();
    } catch (OAuthException e) {
        throw new AlertConfigLoadException("Failed to authenticate AdWordsSession.", e);
    } catch (ValidationException e) {
        throw new AlertConfigLoadException("Failed to build AdWordsSession.", e);
    }

    if (clientCustomerIds == null) {
        clientCustomerIds = retrieveClientCustomerIds(session);

        // Write the client customer IDs into debug log.
        LOGGER.debug("Client customer IDs retrieved:{}{}", SEPARATOR,
                Joiner.on(SEPARATOR).join(clientCustomerIds));
    }

    int count = 0;
    for (JsonElement alertConfig : alertsConfig.getAsJsonArray(ConfigTags.ALERTS)) {
        count++;
        processAlert(clientCustomerIds, session, alertConfig.getAsJsonObject(), count);
    }

    stopwatch.stop();
    LOGGER.info("*** Finished all processing in {} seconds ***",
            stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000);
}

From source file:com.vmware.photon.controller.apife.backends.ResourceTicketSqlBackend.java

/**
 * This method consumes quota associated with the specified cost
 * recorded in the usageMap. IF the cost pushes usage over the limit,
 * then this function has no side effect and false is returned.
 * <p/>//  w  ww.  ja v a2s.  c om
 * Quota limits and Cost metrics are loosely coupled in that a Quota limit
 * can be set for a narrow set of metrics. Only these metrics are used
 * for limit enforcement. All metrics are tracked in usage.
 * <p>
 * Note: it is assumed that locks preventing concurrency on this structure
 * are held externally, or are managed through optimistic concurrency/retry
 * on the container that owns the ResourceTicket object (normally the project).
 * </p>
 *
 * @param resourceTicketId - id of the resource ticket
 * @param cost             - the cost object representing how much will be consumed
 * @throws QuotaException when quota allocation fails
 */
@Override
@Transactional
public void consumeQuota(String resourceTicketId, QuotaCost cost) throws QuotaException {

    Stopwatch resourceTicketWatch = Stopwatch.createStarted();
    ResourceTicketEntity resourceTicket = resourceTicketDao.loadWithUpgradeLock(resourceTicketId);
    resourceTicketWatch.stop();
    logger.info("consumeQuota for resourceTicket {}, lock obtained in {} milliseconds", resourceTicket.getId(),
            resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS));

    // first, whip through the cost's actualCostKeys and
    // compute the new usage. then, if usage is ok, commit
    // the new usage values and then update rawUsage
    List<QuotaLineItemEntity> newUsage = new ArrayList<>();
    for (String key : cost.getCostKeys()) {
        if (!resourceTicket.getUsageMap().containsKey(key)) {
            // make sure usage map has appropriate entries, its only initialized
            // with keys from the limit set
            resourceTicket.getUsageMap().put(key,
                    new QuotaLineItemEntity(key, 0.0, cost.getCost(key).getUnit()));
        }

        // capture current usage into a new object
        QuotaLineItemEntity qli = new QuotaLineItemEntity(key, resourceTicket.getUsageMap().get(key).getValue(),
                resourceTicket.getUsageMap().get(key).getUnit());
        QuotaLineItemEntity computedUsage = qli.add(cost.getCost(key));
        newUsage.add(computedUsage);
    }

    // now compare newUsage against limits. if usage > limit, then return false with no
    // side effects. otherwise, apply the new usage values, then blindly update rawUsage
    for (QuotaLineItemEntity qli : newUsage) {
        // only enforce limits is the usage entry is covered by
        // limits
        if (resourceTicket.getLimitMap().containsKey(qli.getKey())) {
            // test to see if the limit is less than the computed
            // new usage. if it is, then abort
            if (resourceTicket.getLimitMap().get(qli.getKey()).compareTo(qli) < 0) {
                throw new QuotaException(resourceTicket.getLimitMap().get(qli.getKey()),
                        resourceTicket.getLimitMap().get(qli.getKey()), qli);
            }
        }
    }

    // if we made it this far, commit the new usage
    for (QuotaLineItemEntity qli : newUsage) {
        resourceTicket.getUsageMap().put(qli.getKey(), qli);
    }
}

From source file:ru.releng.shameonyou.core.Sampler.java

@Override
public void run() {
    if (state.get() == State.STARTED) {
        Stopwatch responseTimeStopwatch = Stopwatch.createStarted();
        boolean preparedSuccessfully = false;
        recordQueueValue(queueLengthRecorder, inFlight.incrementAndGet());
        try {/*from w  w  w . j a  v  a 2s  .  c o  m*/
            asyncHttpClient.prepareGet(target.getUrl()).execute(new AsyncCompletionHandler<Void>() {

                @Override
                public Void onCompleted(Response response) throws Exception {
                    long responseTime = responseTimeStopwatch.stop().elapsed(TimeUnit.MILLISECONDS);
                    recordTimeValue(responseTimeRecorder, responseTime);
                    recordQueueValue(queueLengthRecorder, inFlight.decrementAndGet());
                    if (response.getStatusCode() >= 200 && response.getStatusCode() < 300) {
                        successes.incrementAndGet();
                    } else {
                        totalErrors.incrementAndGet();
                        errors.incrementAndGet();
                    }
                    LOG.info("Response {} from {} in {} ms", response.getStatusCode(), target.getUrl(),
                            responseTime);
                    return null;
                }

                @Override
                public void onThrowable(Throwable t) {
                    long responseTime = responseTimeStopwatch.stop().elapsed(TimeUnit.MILLISECONDS);
                    recordTimeValue(responseTimeRecorder, responseTime);
                    recordQueueValue(queueLengthRecorder, inFlight.decrementAndGet());
                    totalErrors.incrementAndGet();
                    errors.incrementAndGet();
                    LOG.info("Failure [{}] from {} in {} ms", t.toString(), target.getUrl(), responseTime);
                }
            });
            preparedSuccessfully = true;
        } finally {
            if (!preparedSuccessfully) {
                recordQueueValue(queueLengthRecorder, inFlight.decrementAndGet());
                totalErrors.incrementAndGet();
                errors.incrementAndGet();
            }
        }
    }
}

From source file:com.amazon.kinesis.streaming.agent.tailing.KinesisSender.java

@Override
protected BufferSendResult<KinesisRecord> attemptSend(RecordBuffer<KinesisRecord> buffer) {
    activePutRecordsCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "KinesisStream:" + getDestination());
    try {//from  w  w w .  j  av a  2  s .c o m
        BufferSendResult<KinesisRecord> sendResult = null;
        List<PutRecordsRequestEntry> requestRecords = new ArrayList<>();
        for (KinesisRecord data : buffer) {
            PutRecordsRequestEntry record = new PutRecordsRequestEntry();
            record.setData(data.data());
            record.setPartitionKey(data.partitionKey());
            requestRecords.add(record);
        }
        PutRecordsRequest request = new PutRecordsRequest();
        request.setStreamName(getDestination());
        request.setRecords(requestRecords);
        PutRecordsResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalPutRecordsCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to kinesis stream {}...", flow.getId(), buffer,
                    getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getKinesisClient().putRecords(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalPutRecordsLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (final PutRecordsResultEntry responseEntry : result.getRecords()) {
                final PutRecordsRequestEntry record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent to kinesis stream {}: {}. Failed records: {}", flow.getId(),
                    buffer, getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from kinesis stream {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activePutRecordsCalls.decrementAndGet();
    }
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.jmx.CassandraJmxCompactionManager.java

private boolean executeInParallel(ExecutorService exec, List<? extends Callable<Void>> tasks,
        long timeoutInSeconds) throws InterruptedException, TimeoutException {
    Stopwatch stopWatch = Stopwatch.createStarted();
    List<Future<Void>> futures = exec.invokeAll(tasks, timeoutInSeconds, TimeUnit.SECONDS);

    for (Future<Void> f : futures) {
        if (f.isCancelled()) {
            log.error("Task execution timeouts in {} seconds. Timeout seconds:{}.", stopWatch.stop(),
                    timeoutInSeconds);/*from www . j a  v  a2 s.  c om*/
            throw new TimeoutException(
                    String.format("Task execution timeouts in {} seconds. Timeout seconds:{}.",
                            stopWatch.stop(), timeoutInSeconds));
        }

        try {
            f.get();
        } catch (ExecutionException e) {
            Throwable t = e.getCause();
            if (t instanceof UndeclaredThrowableException) {
                log.error("Major LCS compactions are only supported against C* 2.2+; "
                        + "you will need to manually re-arrange SSTables into L0 "
                        + "if you want all deleted data immediately removed from the cluster.");
            }
            log.error("Failed to complete tasks.", e);
            return false;
        }
    }

    log.info("All tasks completed in {}.", stopWatch.stop());
    return true;
}