Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:org.lenskit.knn.item.model.NormalizingItemItemModelBuilder.java

@SuppressWarnings("deprecation")
@Override//from w  w  w . j  a va 2s  .co  m
public SimilarityMatrixModel get() {
    logger.debug("building item-item model");

    LongSortedSet itemUniverse = buildContext.getItems();

    final int nitems = itemUniverse.size();

    SortedKeyIndex itemDomain = SortedKeyIndex.fromCollection(itemUniverse);
    assert itemDomain.size() == nitems;
    List<ImmutableSparseVector> matrix = Lists.newArrayListWithCapacity(itemDomain.size());

    // working space for accumulating each row (reuse between rows)
    MutableSparseVector currentRow = MutableSparseVector.create(itemUniverse);
    Stopwatch timer = Stopwatch.createStarted();

    for (int i = 0; i < nitems; i++) {
        assert matrix.size() == i;
        final long rowItem = itemDomain.getKey(i);
        final SparseVector vec1 = buildContext.itemVector(rowItem);

        // Take advantage of sparsity if we can
        LongIterator neighbors = iterationStrategy.neighborIterator(buildContext, rowItem, false);
        currentRow.fill(0);

        // Compute similarities and populate the vector
        while (neighbors.hasNext()) {
            final long colItem = neighbors.nextLong();
            final SparseVector vec2 = buildContext.itemVector(colItem);
            assert currentRow.containsKey(colItem);
            currentRow.set(colItem, similarity.similarity(rowItem, vec1, colItem, vec2));
        }

        // Remove the current item (it is not its own neighbor)
        currentRow.unset(rowItem);

        // Normalize and truncate the row
        MutableSparseVector normalized = rowNormalizer.normalize(rowItem, currentRow, null);
        truncator.truncate(normalized);

        matrix.add(normalized.immutable());
    }

    timer.stop();
    logger.info("built model for {} items in {}", nitems, timer);

    return new SimilarityMatrixModel(itemDomain, matrix);
}

From source file:com.facebook.buck.distributed.DistBuildClientExecutor.java

public int executeAndPrintFailuresToEventBus(final WeightedListeningExecutorService executorService,
        ProjectFilesystem projectFilesystem, FileHashCache fileHashCache, BuckEventBus eventBus)
        throws IOException, InterruptedException {

    BuildJob job = distBuildService.createBuild();
    final BuildId id = job.getBuildId();
    LOG.info("Created job. Build id = " + id.getId());
    logDebugInfo(job);/*  ww w .  j  a  va  2s .  c  o m*/

    List<ListenableFuture<Void>> asyncJobs = new LinkedList<>();
    LOG.info("Uploading local changes.");
    asyncJobs.add(distBuildService.uploadMissingFiles(buildJobState.fileHashes, executorService));

    LOG.info("Uploading target graph.");
    asyncJobs.add(distBuildService.uploadTargetGraph(buildJobState, id, executorService));

    LOG.info("Uploading buck dot-files.");
    asyncJobs.add(distBuildService.uploadBuckDotFiles(id, projectFilesystem, fileHashCache, executorService));

    try {
        Futures.allAsList(asyncJobs).get();
    } catch (ExecutionException e) {
        LOG.error("Upload failed.");
        throw new RuntimeException(e);
    }

    distBuildService.setBuckVersion(id, buckVersion);
    LOG.info("Set Buck Version. Build status: " + job.getStatus().toString());

    job = distBuildService.startBuild(id);
    LOG.info("Started job. Build status: " + job.getStatus().toString());
    logDebugInfo(job);

    Stopwatch stopwatch = Stopwatch.createStarted();
    // Keep polling until the build is complete or failed.
    do {
        job = distBuildService.getCurrentBuildJobState(id);
        LOG.info("Got build status: " + job.getStatus().toString());

        DistBuildStatus distBuildStatus = prepareStatusFromJob(job)
                .setETAMillis(MAX_BUILD_DURATION_MILLIS - stopwatch.elapsed(TimeUnit.MILLISECONDS)).build();
        eventBus.post(new DistBuildStatusEvent(distBuildStatus));

        try {
            // TODO(shivanker): Get rid of sleeps in methods which we want to unit test
            Thread.sleep(millisBetweenStatusPoll);
        } catch (InterruptedException e) {
            LOG.error(e, "BuildStatus polling sleep call has been interrupted unexpectedly.");
        }
    } while (!(job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY)
            || job.getStatus().equals(BuildStatus.FAILED)));

    LOG.info("Build was " + (job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) ? "" : "not ")
            + "successful!");
    logDebugInfo(job);

    DistBuildStatus distBuildStatus = prepareStatusFromJob(job).setETAMillis(0).build();
    eventBus.post(new DistBuildStatusEvent(distBuildStatus));

    return job.getStatus().equals(BuildStatus.FINISHED_SUCCESSFULLY) ? 0 : 1;
}

From source file:org.apache.drill.exec.store.parquet.stat.ParquetFooterStatCollector.java

@Override
public Map<SchemaPath, ColumnStatistics> collectColStat(Set<SchemaPath> fields) {
    Stopwatch timer = Stopwatch.createStarted();

    ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
            .detectCorruptDates(footer, new ArrayList<>(fields), autoCorrectCorruptDates);

    // map from column name to ColumnDescriptor
    Map<SchemaPath, ColumnDescriptor> columnDescMap = new HashMap<>();

    // map from column name to ColumnChunkMetaData
    final Map<SchemaPath, ColumnChunkMetaData> columnChkMetaMap = new HashMap<>();

    // map from column name to MajorType
    final Map<SchemaPath, TypeProtos.MajorType> columnTypeMap = new HashMap<>();

    // map from column name to SchemaElement
    final Map<SchemaPath, SchemaElement> schemaElementMap = new HashMap<>();

    // map from column name to column statistics.
    final Map<SchemaPath, ColumnStatistics> statMap = new HashMap<>();

    final org.apache.parquet.format.FileMetaData fileMetaData = new ParquetMetadataConverter()
            .toParquetMetadata(ParquetFileWriter.CURRENT_VERSION, footer);

    for (final ColumnDescriptor column : footer.getFileMetaData().getSchema().getColumns()) {
        final SchemaPath schemaPath = SchemaPath.getCompoundPath(column.getPath());
        if (fields.contains(schemaPath)) {
            columnDescMap.put(schemaPath, column);
        }/*  w  w w  .  j a v a 2  s  . c o  m*/
    }

    for (final SchemaElement se : fileMetaData.getSchema()) {
        final SchemaPath schemaPath = SchemaPath.getSimplePath(se.getName());
        if (fields.contains(schemaPath)) {
            schemaElementMap.put(schemaPath, se);
        }
    }

    for (final ColumnChunkMetaData colMetaData : footer.getBlocks().get(rowGroupIndex).getColumns()) {
        final SchemaPath schemaPath = SchemaPath.getCompoundPath(colMetaData.getPath().toArray());
        if (fields.contains(schemaPath)) {
            columnChkMetaMap.put(schemaPath, colMetaData);
        }
    }

    for (final SchemaPath path : fields) {
        if (columnDescMap.containsKey(path) && schemaElementMap.containsKey(path)
                && columnChkMetaMap.containsKey(path)) {
            ColumnDescriptor columnDesc = columnDescMap.get(path);
            SchemaElement se = schemaElementMap.get(path);
            ColumnChunkMetaData metaData = columnChkMetaMap.get(path);

            TypeProtos.MajorType type = ParquetToDrillTypeConverter.toMajorType(columnDesc.getType(),
                    se.getType_length(), getDataMode(columnDesc), se, options);

            columnTypeMap.put(path, type);

            Statistics stat = metaData.getStatistics();
            if (type.getMinorType() == TypeProtos.MinorType.DATE) {
                stat = convertDateStatIfNecessary(metaData.getStatistics(), containsCorruptDates);
            }

            statMap.put(path, new ColumnStatistics(stat, type));
        } else {
            final String columnName = path.getRootSegment().getPath();
            if (implicitColValues.containsKey(columnName)) {
                TypeProtos.MajorType type = Types.required(TypeProtos.MinorType.VARCHAR);
                Statistics stat = new BinaryStatistics();
                stat.setNumNulls(0);
                byte[] val = implicitColValues.get(columnName).getBytes();
                stat.setMinMaxFromBytes(val, val);
                statMap.put(path, new ColumnStatistics(stat, type));
            }
        }
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Took {} ms to column statistics for row group", timer.elapsed(TimeUnit.MILLISECONDS));
    }

    return statMap;
}

From source file:suneido.DbTools.java

public static Status checkPrint(String dbFilename) {
    System.out.println("Checking " + (dbFilename.endsWith(".tmp") ? "" : dbFilename + " ") + "...");
    Stopwatch sw = Stopwatch.createStarted();
    Status result = Dbpkg.check(dbFilename, printObserver);
    System.out.println("Checked in " + sw);
    return result;
}

From source file:com.google.api.ads.adwords.awalerting.processor.AlertRulesProcessor.java

/**
 * Process the ReportData list with the alert rules, each report with all rules per thread.
 *
 * @param reports the list of ReportData to run each alert action against
 *///from  w  w w  . j a v  a2  s . co  m
public void processReports(List<ReportData> reports) throws AlertProcessingException {
    // Create one thread for each report, and apply all alert rules in sequence
    Stopwatch stopwatch = Stopwatch.createStarted();

    CountDownLatch latch = new CountDownLatch(reports.size());
    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    for (ReportData report : reports) {
        RunnableAlertRulesProcessor rulesProcessor = new RunnableAlertRulesProcessor(report, rules,
                alertMessage);
        executeRunnableAlertRulesProcessor(executorService, rulesProcessor, latch);
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new AlertProcessingException("AlertRulesProcessor encounters InterruptedException.", e);
    }

    executorService.shutdown();
    stopwatch.stop();

    LOGGER.info("*** Processed {} rules and add alert messages on {} reports in {} seconds.", rules.size(),
            reports.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000);
}

From source file:org.n52.youngs.control.impl.SingleThreadBulkRunner.java

@Override
public Report load(final Sink sink) {
    this.sink = sink;
    Objects.nonNull(source);/*  w  ww  .ja va2s. c om*/
    Objects.nonNull(mapper);
    Objects.nonNull(this.sink);

    log.info("Starting harvest from {} to {} with {}", source, this.sink, mapper);
    Report report = new ReportImpl();

    try {
        boolean prepareSink = sink.prepare(mapper.getMapper());
        if (!prepareSink) {
            String msg = "The sink could not be prepared. Stopping load, please check the logs.";
            log.error(msg);
            report.addMessage(msg);
            return report;
        }
    } catch (SinkError e) {
        log.error("Problem preparing sink", e);
        report.addMessage(String.format("Problem preparing sink: %s", e.getMessage()));
        return report;
    }

    final Stopwatch timer = Stopwatch.createStarted();
    long pageStart = startPosition;
    long count = source.getRecordCount();
    final long limit = Math.min(recordsLimit + startPosition, count);

    final Stopwatch sourceTimer = Stopwatch.createUnstarted();
    final Stopwatch mappingTimer = Stopwatch.createUnstarted();
    final Stopwatch sinkTimer = Stopwatch.createUnstarted();
    final Stopwatch currentBulkTimer = Stopwatch.createUnstarted();
    double bulkTimeAvg = 0d;
    long runNumber = 0;

    while (pageStart <= limit) {
        currentBulkTimer.start();

        long recordsLeft = limit - pageStart + 1;
        long size = Math.min(recordsLeft, bulkSize);
        if (size <= 0) {
            break;
        }
        log.info("### [{}] Requesting {} records from {} starting at {}, last requested record will be {} ###",
                runNumber, size, source.getEndpoint(), pageStart, limit);

        try {
            sourceTimer.start();
            Collection<SourceRecord> records = source.getRecords(pageStart, size, report);
            sourceTimer.stop();

            log.debug("Mapping {} retrieved records.", records.size());
            mappingTimer.start();
            List<SinkRecord> mappedRecords = records.stream().map(record -> {
                try {
                    return mapper.map(record);
                } catch (MappingError e) {
                    report.addFailedRecord(record.toString(), "Problem during mapping: " + e.getMessage());
                    return null;
                }
            }).filter(Objects::nonNull).collect(Collectors.toList());
            mappingTimer.stop();

            log.debug("Storing {} mapped records.", mappedRecords.size());
            if (!testRun) {
                sinkTimer.start();
                mappedRecords.forEach(record -> {
                    try {
                        boolean result = sink.store(record);
                        if (result) {
                            report.addSuccessfulRecord(record.getId());
                        } else {
                            report.addFailedRecord(record.getId(), "see sink log");
                        }
                    } catch (SinkError e) {
                        report.addFailedRecord(record.toString(), "Problem during mapping: " + e.getMessage());
                    }
                });
                sinkTimer.stop();
            } else {
                log.info("TESTRUN, created documents are:\n{}", Arrays.toString(mappedRecords.toArray()));
            }

        } catch (RuntimeException e) {
            if (sourceTimer.isRunning()) {
                sourceTimer.stop();
            }
            if (mappingTimer.isRunning()) {
                mappingTimer.stop();
            }
            if (sinkTimer.isRunning()) {
                sinkTimer.stop();
            }

            String msg = String.format("Problem processing records %s to %s: %s", pageStart, pageStart + size,
                    e.getMessage());
            log.error(msg, e);
            report.addMessage(msg);
        }

        pageStart += bulkSize;

        currentBulkTimer.stop();
        bulkTimeAvg = ((bulkTimeAvg * runNumber) + currentBulkTimer.elapsed(TimeUnit.SECONDS))
                / (runNumber + 1);
        updateAndLog(runNumber, (runNumber + 1) * bulkSize, currentBulkTimer.elapsed(TimeUnit.SECONDS),
                bulkTimeAvg);
        currentBulkTimer.reset();

        runNumber++;
    }

    timer.stop();
    log.info("Completed harvesting for {} ({} failed) of {} records in {} minutes",
            report.getNumberOfRecordsAdded(), report.getNumberOfRecordsFailed(), source.getRecordCount(),
            timer.elapsed(TimeUnit.MINUTES));
    log.info("Time spent (minutes): source={}, mapping={}, sink={}", sourceTimer.elapsed(TimeUnit.MINUTES),
            mappingTimer.elapsed(TimeUnit.MINUTES), sinkTimer.elapsed(TimeUnit.MINUTES));

    return report;
}

From source file:com.google.api.ads.adwords.awalerting.processor.AlertActionsProcessor.java

/**
 * Process the ReportData list with alert actions, all reports with each action per thread.
 *
 * @param reports the list of ReportData to run each alert action against.
 *//*  w w w  .  j a  va 2 s .com*/
public void processReports(List<ReportData> reports) throws AlertProcessingException {
    // Create one thread for each AlertAction, and process all reports
    Stopwatch stopwatch = Stopwatch.createStarted();

    CountDownLatch latch = new CountDownLatch(actions.size());
    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    for (AlertAction action : actions) {
        RunnableAlertActionProcessor actionProcessor = new RunnableAlertActionProcessor(action, reports);
        executeRunnableAlertActionProcessor(executorService, actionProcessor, latch);
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new AlertProcessingException("AlertActionsProcessor encounters InterruptedException.", e);
    }

    executorService.shutdown();
    stopwatch.stop();

    LOGGER.info("*** Processed {} actions on {} reports in {} seconds.", actions.size(), reports.size(),
            stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000);
}

From source file:org.opendaylight.protocol.pcep.pcc.mock.PCCMockCommon.java

private static void checkNumberOfMessages(final int expectedNMessages, final TestingSessionListener listener)
        throws Exception {
    Stopwatch sw = Stopwatch.createStarted();
    while (sw.elapsed(TimeUnit.SECONDS) <= 10) {
        if (expectedNMessages != listener.messages().size()) {
            Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
        } else {//from  ww w .  j a  v a2s.co m
            return;
        }
    }
    Assert.assertEquals(expectedNMessages, listener.messages().size());
}

From source file:com.palantir.atlasdb.keyvalue.impl.ProfilingKeyValueService.java

@Override
public void addGarbageCollectionSentinelValues(String tableName, Set<Cell> cells) {
    if (log.isTraceEnabled()) {
        Stopwatch stopwatch = Stopwatch.createStarted();
        delegate.addGarbageCollectionSentinelValues(tableName, cells);
        log.trace("Call to KVS.addGarbageCollectionSentinelValues on table {} over {} cells took {} ms.",
                tableName, cells.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } else {/*from  ww  w.  ja v a 2s .c o  m*/
        delegate.addGarbageCollectionSentinelValues(tableName, cells);
    }
}

From source file:com.facebook.buck.distributed.MultiSourceContentsProvider.java

private ListenableFuture<Boolean> postLocalFsMaterializationHelper(boolean success,
        BuildJobStateFileHashEntry entry, Path targetAbsPath) {
    if (success) {
        fileMaterializationStatsTracker.recordLocalFileMaterialized();
        LOG.info("Materialized source file using Local Source File Cache: [%s]", targetAbsPath);
        return Futures.immediateFuture(true);
    }//from   ww w. j ava2  s  . c o  m

    Stopwatch remoteMaterializationStopwatch = Stopwatch.createStarted();
    return Futures.transformAsync(serverContentsProvider.materializeFileContentsAsync(entry, targetAbsPath),
            (remoteSuccess) -> postRemoteMaterializationHelper(remoteSuccess, entry, targetAbsPath,
                    remoteMaterializationStopwatch.elapsed(TimeUnit.MILLISECONDS)),
            executorService);
}