Example usage for com.google.common.base Stopwatch elapsed

List of usage examples for com.google.common.base Stopwatch elapsed

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch elapsed.

Prototype

@CheckReturnValue
public long elapsed(TimeUnit desiredUnit) 

Source Link

Document

Returns the current elapsed time shown on this stopwatch, expressed in the desired time unit, with any fraction rounded down.

Usage

From source file:org.brekka.pegasus.core.services.impl.PegasusPrincipalServiceImpl.java

@Override
@Transactional()/*w w  w .  j  a  va  2s. c o m*/
public void logout(final PegasusPrincipal pegasusPrincipal) {
    synchronized (pegasusPrincipal) {
        MemberContextImpl memberContext = (MemberContextImpl) pegasusPrincipal.getMemberContext();
        Stopwatch sw = Stopwatch.createStarted();
        if (memberContext != null) {
            List<AuthenticatedPrincipal> authenticatedPrincipals = memberContext.clearVaults();
            for (AuthenticatedPrincipal authenticatedPrincipal : authenticatedPrincipals) {
                this.phalanxService.logout(authenticatedPrincipal);
            }
        }
        ((PegasusPrincipalImpl) pegasusPrincipal).setMemberContext(null);
        if (log.isInfoEnabled()) {
            log.info(String.format("Pegasus logout for '%s' took %d ms", pegasusPrincipal.getName(),
                    sw.elapsed(TimeUnit.MILLISECONDS)));
        }
    }
}

From source file:com.palantir.atlasdb.sweep.BackgroundSweeperImpl.java

private boolean runOnce() {
    SweepProgressRowResult progress = txManager
            .runTaskWithRetry(new RuntimeTransactionTask<SweepProgressRowResult>() {
                @Override/*from ww  w .j  ava2  s . com*/
                public SweepProgressRowResult execute(Transaction t) {
                    SweepProgressTable progressTable = tableFactory.getSweepProgressTable(t);
                    SweepProgressRowResult result = progressTable.getRow(SweepProgressRow.of(0)).orNull();
                    if (result == null) {
                        result = chooseNextTableToSweep(new SweepTransaction(t,
                                sweepRunner.getSweepTimestamp(SweepStrategy.CONSERVATIVE)));
                    }
                    return result;
                }
            });
    if (progress == null) {
        // Don't change this log statement. It's parsed by test automation code.
        log.debug("Skipping sweep because no table has enough new writes to be worth sweeping at the moment.");
        return false;
    }
    int batchSize = Math.max(1, (int) (sweepBatchSize.get() * batchSizeMultiplier));
    Stopwatch watch = Stopwatch.createStarted();
    try {
        SweepResults results = sweepRunner.run(progress.getFullTableName(), batchSize, progress.getStartRow());
        log.debug("Swept {} unique cells from {} starting at {} and performed {} deletions in {} ms.",
                results.getCellsExamined(), progress.getFullTableName(),
                progress.getStartRow() == null ? "0" : PtBytes.encodeHexString(progress.getStartRow()),
                results.getCellsDeleted(), watch.elapsed(TimeUnit.MILLISECONDS));
        saveSweepResults(progress, results);
        return true;
    } catch (RuntimeException e) {
        // Error logged at a higher log level above.
        log.debug("Failed to sweep {} with batch size {} starting from row {}", progress.getFullTableName(),
                batchSize,
                progress.getStartRow() == null ? "0" : PtBytes.encodeHexString(progress.getStartRow()));
        throw e;
    }
}

From source file:org.apache.drill.jdbc.test.JdbcTestActionBase.java

protected void testAction(JdbcAction action, long rowcount) throws Exception {
    int rows = 0;
    Stopwatch watch = new Stopwatch().start();
    ResultSet r = action.getResult(connection);
    boolean first = true;
    while (r.next()) {
        rows++;/* w ww. j a v  a 2s . co m*/
        ResultSetMetaData md = r.getMetaData();
        if (first == true) {
            for (int i = 1; i <= md.getColumnCount(); i++) {
                System.out.print(md.getColumnName(i));
                System.out.print('\t');
            }
            System.out.println();
            first = false;
        }

        for (int i = 1; i <= md.getColumnCount(); i++) {
            System.out.print(r.getObject(i));
            System.out.print('\t');
        }
        System.out.println();
    }

    System.out.println(String.format("Query completed in %d millis.", watch.elapsed(TimeUnit.MILLISECONDS)));

    if (rowcount != -1) {
        Assert.assertEquals((long) rowcount, (long) rows);
    }

    System.out.println("\n\n\n");

}

From source file:org.hashtrees.manager.HashTreesManager.java

public void synch(final ServerName sn, final long treeId, boolean doAuthenticate, SyncType syncType)
        throws IOException, SynchNotAllowedException {
    boolean synchAllowed = doAuthenticate ? authenticator.canSynch(localServer, sn) : true;
    Pair<ServerName, Long> hostNameAndTreeId = Pair.create(sn, treeId);
    if (synchAllowed) {
        boolean synced = false;
        SyncDiffResult result = null;/*  w ww  .  j  a  va2  s . c  o m*/
        notifier.preSync(treeId, sn);
        try {
            LOG.info("Syncing {}.", hostNameAndTreeId);
            Stopwatch watch = Stopwatch.createStarted();
            HashTreesRemoteClient remoteSyncClient = getHashTreeSyncClient(sn);
            result = hashTrees.synch(treeId, remoteSyncClient, syncType);
            LOG.info("Synch result for {} - {}", hostNameAndTreeId, result);
            watch.stop();
            LOG.info("Time taken for syncing ({}) (in ms) : {}", hostNameAndTreeId,
                    watch.elapsed(TimeUnit.MILLISECONDS));
            LOG.info("Syncing {} complete.", hostNameAndTreeId);
            synced = true;
        } catch (TException e) {
            LOG.error("Unable to synch remote hash tree server {} : {}", hostNameAndTreeId, e);
        } finally {
            notifier.postSync(treeId, sn, result, synced);
        }
    } else {
        LOG.error("Synch is not allowed between {} and {}", localServer, sn);
        throw new SynchNotAllowedException(localServer, sn);
    }
}

From source file:demos.BatchInsert.java

public void run() {
    try {//from   w  w w . ja  va 2s.  com
        logger.info("Preparing to insert metric data points");

        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
        Session session = cluster.connect("demo");
        PreparedStatement insert = session
                .prepare("insert into metric_data (metric_id, time, value) values (?, ?, ?)");
        Random random = new Random();
        DateTime time = DateTime.now().minusYears(1);
        final CountDownLatch latch = new CountDownLatch(NUM_INSERTS / BATCH_SIZE);

        FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {
            @Override
            public void onSuccess(ResultSet result) {
                latch.countDown();
            }

            @Override
            public void onFailure(Throwable t) {
                logger.warn("There was an error inserting data", t);
                latch.countDown();
            }
        };

        Stopwatch stopwatch = new Stopwatch().start();
        BatchStatement batch = new BatchStatement();
        for (int i = 0; i < NUM_INSERTS; ++i) {
            String metricId = "metric-" + Math.abs(random.nextInt() % NUM_METRICS);
            double value = random.nextDouble();
            batch.add(insert.bind(metricId, time.toDate(), value));
            time = time.plusSeconds(10);
            if (batch.size() == BATCH_SIZE) {
                ResultSetFuture future = session.executeAsync(batch);
                Futures.addCallback(future, callback);
                batch = new BatchStatement();
            }
        }
        latch.await();
        stopwatch.stop();

        logger.info("Finished inserting {} data points in {} ms", NUM_INSERTS,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (InterruptedException e) {
        logger.info("There was an interrupt while waiting for inserts to complete");
    }
}

From source file:com.spotify.heroic.metric.astyanax.AstyanaxBackend.java

private AsyncFuture<FetchData> fetchDataPoints(final Series series, DateRange range,
        final FetchQuotaWatcher watcher) {
    return context.doto(ctx -> {
        return async.resolved(prepareQueries(series, range)).lazyTransform(result -> {
            final List<AsyncFuture<FetchData>> queries = new ArrayList<>();

            for (final PreparedQuery q : result) {
                queries.add(async.call(new Callable<FetchData>() {
                    @Override//from   w ww.ja va2  s.c o  m
                    public FetchData call() throws Exception {
                        final Stopwatch w = Stopwatch.createStarted();

                        final RowQuery<MetricsRowKey, Integer> query = ctx.client.prepareQuery(METRICS_CF)
                                .getRow(q.rowKey).autoPaginate(true).withColumnRange(q.columnRange);

                        final List<Point> data = q.rowKey.buildPoints(query.execute().getResult());

                        if (!watcher.readData(data.size())) {
                            throw new IllegalArgumentException("data limit quota violated");
                        }

                        final QueryTrace trace = new QueryTrace(FETCH_SEGMENT, w.elapsed(TimeUnit.NANOSECONDS));
                        final List<Long> times = ImmutableList.of(trace.getElapsed());
                        final List<MetricCollection> groups = ImmutableList.of(MetricCollection.points(data));
                        return new FetchData(series, times, groups, trace);
                    }
                }, pools.read()).onDone(reporter.reportFetch()));
            }

            return async.collect(queries, FetchData.collect(FETCH, series));
        });
    });
}

From source file:com.metamx.druid.indexing.coordinator.RemoteTaskRunner.java

/**
 * Creates a ZK entry under a specific path associated with a worker. The worker is responsible for
 * removing the task ZK entry and creating a task status ZK entry.
 *
 * @param theWorker          The worker the task is assigned to
 * @param taskRunnerWorkItem The task to be assigned
 */// w ww . j a  v a2s .c  om
private void announceTask(Worker theWorker, RemoteTaskRunnerWorkItem taskRunnerWorkItem) throws Exception {
    final Task task = taskRunnerWorkItem.getTask();

    log.info("Coordinator asking Worker[%s] to add task[%s]", theWorker.getHost(), task.getId());

    byte[] rawBytes = jsonMapper.writeValueAsBytes(task);
    if (rawBytes.length > config.getMaxNumBytes()) {
        throw new ISE("Length of raw bytes for task too large[%,d > %,d]", rawBytes.length,
                config.getMaxNumBytes());
    }

    String taskPath = JOINER.join(config.getIndexerTaskPath(), theWorker.getHost(), task.getId());

    if (cf.checkExists().forPath(taskPath) == null) {
        cf.create().withMode(CreateMode.EPHEMERAL).forPath(taskPath, rawBytes);
    }

    RemoteTaskRunnerWorkItem workItem = pendingTasks.remove(task.getId());
    if (workItem == null) {
        log.makeAlert("WTF?! Got a null work item from pending tasks?! How can this be?!")
                .addData("taskId", task.getId()).emit();
        return;
    }

    RemoteTaskRunnerWorkItem newWorkItem = workItem.withWorker(theWorker);
    runningTasks.put(task.getId(), newWorkItem);
    log.info("Task %s switched from pending to running (on [%s])", task.getId(),
            newWorkItem.getWorker().getHost());

    // Syncing state with Zookeeper - don't assign new tasks until the task we just assigned is actually running
    // on a worker - this avoids overflowing a worker with tasks
    Stopwatch timeoutStopwatch = new Stopwatch();
    timeoutStopwatch.start();
    synchronized (statusLock) {
        while (!isWorkerRunningTask(theWorker, task)) {
            statusLock.wait(config.getTaskAssignmentTimeoutDuration().getMillis());
            if (timeoutStopwatch.elapsed(TimeUnit.MILLISECONDS) >= config.getTaskAssignmentTimeoutDuration()
                    .getMillis()) {
                log.error("Something went wrong! %s never ran task %s after %s!", theWorker.getHost(),
                        task.getId(), config.getTaskAssignmentTimeoutDuration());

                taskRunnerWorkItem.setResult(TaskStatus.failure(taskRunnerWorkItem.getTask().getId()));
                break;
            }
        }
    }
}

From source file:org.haiku.haikudepotserver.job.LocalJobServiceImpl.java

@Override
public boolean awaitAllJobsFinishedUninterruptibly(long timeout) {
    Preconditions.checkArgument(timeout > 0);
    Stopwatch stopwatch = Stopwatch.createStarted();
    EnumSet<JobSnapshot.Status> earlyStatuses = EnumSet.of(JobSnapshot.Status.QUEUED,
            JobSnapshot.Status.STARTED);

    while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < timeout
            && !filteredInternalJobs(null, earlyStatuses).isEmpty()) {
        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
    }// w w  w.  ja  v a2 s  .c  om

    return filteredInternalJobs(null, earlyStatuses).isEmpty();
}

From source file:com.google.api.ads.adwords.jaxws.extensions.processors.onfile.ReportProcessorOnFile.java

/**
 * Generate all the mapped reports to the given account IDs.
 *
 * @param dateRangeType the date range type.
 * @param dateStart the starting date./*from www  .java2 s  . c  o m*/
 * @param dateEnd the ending date.
 * @param accountIdsSet the account IDs.
 * @param properties the properties file
 * @throws Exception error reaching the API.
 */
@Override
public void generateReportsForMCC(String userId, String mccAccountId,
        ReportDefinitionDateRangeType dateRangeType, String dateStart, String dateEnd, Set<Long> accountIdsSet,
        Properties properties) throws Exception {

    LOGGER.info("*** Retrieving account IDs ***");

    if (accountIdsSet == null || accountIdsSet.size() == 0) {
        accountIdsSet = this.retrieveAccountIds(userId, mccAccountId);
    } else {
        LOGGER.info("Accounts loaded from file.");
    }

    AdWordsSessionBuilderSynchronizer sessionBuilder = new AdWordsSessionBuilderSynchronizer(
            authenticator.authenticate(userId, mccAccountId, false));

    LOGGER.info("*** Generating Reports for " + accountIdsSet.size() + " accounts ***");

    Stopwatch stopwatch = Stopwatch.createStarted();

    Set<ReportDefinitionReportType> reports = this.csvReportEntitiesMapping.getDefinedReports();

    // reports
    for (ReportDefinitionReportType reportType : reports) {
        if (properties.containsKey(reportType.name())) {
            this.downloadAndProcess(userId, mccAccountId, sessionBuilder, reportType, dateRangeType, dateStart,
                    dateEnd, accountIdsSet, properties);
        }
    }

    this.multipleClientReportDownloader.finalizeExecutorService();

    stopwatch.stop();
    LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000)
            + " seconds ***\n");
}

From source file:org.haiku.haikudepotserver.job.LocalJobServiceImpl.java

@Override
public boolean awaitJobFinishedUninterruptibly(String guid, long timeout) {
    Preconditions.checkArgument(!Strings.isNullOrEmpty(guid), "a guid must be supplied");
    Preconditions.checkArgument(timeout > 0);
    Stopwatch stopwatch = Stopwatch.createStarted();
    EnumSet<JobSnapshot.Status> earlyStatuses = EnumSet.of(JobSnapshot.Status.QUEUED,
            JobSnapshot.Status.STARTED);

    while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < timeout
            && tryGetJob(guid).filter((j) -> earlyStatuses.contains(j.getStatus())).isPresent()) {
        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
    }/*ww w  .ja v a2s .c o m*/

    return tryGetJob(guid).filter((j) -> earlyStatuses.contains(j.getStatus())).isPresent();
}