Example usage for com.google.common.base Stopwatch createStarted

List of usage examples for com.google.common.base Stopwatch createStarted

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch createStarted.

Prototype

@CheckReturnValue
public static Stopwatch createStarted() 

Source Link

Document

Creates (and starts) a new stopwatch using System#nanoTime as its time source.

Usage

From source file:org.apache.jackrabbit.oak.plugins.document.secondary.SecondaryStoreObserver.java

@Override
public void contentChanged(@Nonnull NodeState root, @Nullable CommitInfo info) {
    //Diff here would also be traversing non visible areas and there
    //diffManyChildren might pose problem for e.g. data under uuid index
    if (!firstEventProcessed) {
        log.info("Starting initial sync");
    }/*from  ww  w  .  j av a  2  s .c  o  m*/

    Stopwatch w = Stopwatch.createStarted();
    AbstractDocumentNodeState target = (AbstractDocumentNodeState) root;
    NodeState secondaryRoot = nodeStore.getRoot();
    NodeState base = DelegatingDocumentNodeState.wrapIfPossible(secondaryRoot, differ);
    NodeBuilder builder = secondaryRoot.builder();
    ApplyDiff diff = new PathFilteringDiff(builder, pathFilter, target);

    //Copy the root node meta properties
    PathFilteringDiff.copyMetaProperties(target, builder);

    //Apply the rest of properties
    target.compareAgainstBaseState(base, diff);
    try {
        NodeState updatedSecondaryRoot = nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
        secondaryObserver.contentChanged(DelegatingDocumentNodeState.wrap(updatedSecondaryRoot, differ));

        TimerStats timer = info == null ? external : local;
        timer.update(w.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

        if (!firstEventProcessed) {
            log.info("Time taken for initial sync {}", w);
            firstEventProcessed = true;
        }
    } catch (CommitFailedException e) {
        //TODO
        log.warn("Commit to secondary store failed", e);
    }
}

From source file:org.apache.geode_examples.cq.Example.java

private void startPuttingData(Region region) throws InterruptedException {

    // Example will run for 20 second

    Stopwatch stopWatch = Stopwatch.createStarted();

    while (stopWatch.elapsed(TimeUnit.SECONDS) < 20) {

        // 500ms delay to make this easier to follow
        Thread.sleep(500);/*from  www  . j  a v a2 s .c om*/
        int randomKey = ThreadLocalRandom.current().nextInt(0, 99 + 1);
        int randomValue = ThreadLocalRandom.current().nextInt(0, 100 + 1);
        region.put(randomKey, randomValue);
        System.out.println("Key: " + randomKey + "     Value: " + randomValue);

    }

    stopWatch.stop();

}

From source file:com.spotify.heroic.metric.generated.GeneratedBackend.java

@Override
public AsyncFuture<FetchData> fetch(MetricType source, Series series, DateRange range,
        FetchQuotaWatcher watcher, QueryOptions options) {
    final Stopwatch w = Stopwatch.createStarted();

    if (source == MetricType.POINT) {
        final List<Point> data = generator.generatePoints(series, range, watcher);
        final QueryTrace trace = new QueryTrace(FETCH, w.elapsed(TimeUnit.NANOSECONDS));
        final ImmutableList<Long> times = ImmutableList.of(trace.getElapsed());
        final List<MetricCollection> groups = ImmutableList.of(MetricCollection.points(data));
        return async.resolved(new FetchData(series, times, groups, trace));
    }/*w  w w.ja va  2s .  co  m*/

    if (source == MetricType.EVENT) {
        final List<Event> data = generator.generateEvents(series, range, watcher);
        final QueryTrace trace = new QueryTrace(FETCH, w.elapsed(TimeUnit.NANOSECONDS));
        final ImmutableList<Long> times = ImmutableList.of(trace.getElapsed());
        final List<MetricCollection> groups = ImmutableList.of(MetricCollection.events(data));
        return async.resolved(new FetchData(series, times, groups, trace));
    }

    throw new IllegalArgumentException("unsupported source: " + source);
}

From source file:org.apache.bookkeeper.tests.integration.cluster.BookKeeperClusterTestBase.java

protected static void waitUntilBookieUnregistered(String bookieName) throws Exception {
    Stopwatch sw = Stopwatch.createStarted();
    while (findIfBookieRegistered(bookieName)) {
        TimeUnit.MILLISECONDS.sleep(1000);
        log.info("Bookie {} is still registered in cluster {} after {} ms elapsed", bookieName,
                bkCluster.getClusterName(), sw.elapsed(TimeUnit.MILLISECONDS));
    }//from www .j  a  v a 2  s . co m
}

From source file:qa.qcri.nadeef.core.pipeline.DirectIterator.java

@Override
protected java.util.Iterator<Violation> execute(Collection<Table> blocks) throws Exception {
    Tracer tracer = Tracer.getTracer(DirectIterator.class);
    ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("iterator-#" + MAX_THREAD_NUM + "-%d")
            .build();//from w w w. j av a  2s.c  o m
    ExecutorService executor = Executors.newFixedThreadPool(MAX_THREAD_NUM, factory);
    Stopwatch stopwatch = Stopwatch.createStarted();

    ExecutionContext context = getCurrentContext();
    Rule rule = context.getRule();
    NonBlockingCollectionIterator<Violation> output = new NonBlockingCollectionIterator<>();
    try {
        if (rule.supportTwoTables()) {
            // Rule runs on two tables.
            executor.submit(new IteratorCallable(blocks, rule, context.getNewTuples(), output));
        } else {
            // Rule runs on each table.
            for (Table table : blocks)
                executor.submit(
                        new IteratorCallable(Arrays.asList(table), rule, context.getNewTuples(), output));
        }

        // wait until all the tasks are finished
        executor.shutdown();
        while (!executor.awaitTermination(10l, TimeUnit.MINUTES))
            ;
    } catch (InterruptedException ex) {
        tracer.err("Iterator is interrupted.", ex);
    } finally {
        executor.shutdown();
    }

    PerfReport.appendMetric(PerfReport.Metric.IteratorTime, stopwatch.elapsed(TimeUnit.MILLISECONDS));

    stopwatch.stop();
    return output;
}

From source file:com.persinity.ndt.controller.step.NdtDisableAndUninstall.java

@Override
protected void work() {
    if (workInProgress.getAndSet(true)) {
        return;/*from ww w .  j  a  v a 2  s .  c o m*/
    }

    final AgentContext agentContext = (AgentContext) getCtx().get(AgentContext.class);
    if (agentContext == null) {
        return;
    }

    RelDb srcNdtDb = null;
    RelDb dstNdtDb = null;
    RelDb srcAppDb = null;
    try {
        srcNdtDb = getController().getRelDbPoolFactory().ndtBridge().src().get();
        dstNdtDb = getController().getRelDbPoolFactory().ndtBridge().dst().get();
        srcAppDb = getController().getRelDbPoolFactory().appBridge().src().get();
        final RelDb srcNdtDbRef = srcNdtDb;
        final RelDb dstNdtDbRef = dstNdtDb;
        final RelDb srcAppDbRef = srcAppDb;

        log.info("Uninstalling NDT at {}, {}, {}", srcAppDb, srcNdtDb, dstNdtDb);
        final Stopwatch stTotal = Stopwatch.createStarted();

        view.logNdtMessage("Uninstalling NDT");
        if (!disableProgress) {
            view.setProgress(PROGRESS_ON);
        }

        log.info("Unmounting cdc agent on {}...", srcAppDb);
        final DirectedEdge<RuntimeException, Stopwatch> cdcAgentUnmountRes = timeOf(
                new Function<Void, RuntimeException>() {
                    @Override
                    public RuntimeException apply(final Void arg) {
                        try {
                            dbAgentExecutor.cdcAgentUnmount(agentContext.getCdcAgent(), srcAppDbRef);
                            srcAppDbRef.commit();
                            return null;
                        } catch (RuntimeException e) {
                            log.error(e, "Source CDC agent unmount failed");
                            return e;
                        }
                    }
                }, null);
        log.info("Unmounted cdc agent for {}", cdcAgentUnmountRes.dst());

        log.info("Unmounting clog agent on {}...", srcNdtDb);
        final DirectedEdge<RuntimeException, Stopwatch> clogAgentUnmountRes = timeOf(
                new Function<Void, RuntimeException>() {
                    @Override
                    public RuntimeException apply(final Void arg) {
                        try {
                            dbAgentExecutor.clogAgentUnmount(agentContext.getSrcClogAgent(), srcNdtDbRef);
                            srcNdtDbRef.commit();
                            return null;
                        } catch (RuntimeException e) {
                            log.error(e, "Source CLOG agent unmount failed");
                            return e;
                        }
                    }
                }, null);
        log.info("Unmounted clog agent for {}", clogAgentUnmountRes.dst());

        log.info("Unmounting clog/schema agents on {}...", dstNdtDb);
        final DirectedEdge<RuntimeException, Stopwatch> dstNdtUnmountRes = timeOf(
                new Function<Void, RuntimeException>() {
                    @Override
                    public RuntimeException apply(final Void arg) {
                        try {
                            dbAgentExecutor.clogAgentUnmount(agentContext.getDstClogAgent(), dstNdtDbRef);
                            dbAgentExecutor.schemaAgentUnmount(agentContext.getDstSchemaAgent(), dstNdtDbRef);
                            dstNdtDbRef.commit();
                            return null;
                        } catch (RuntimeException e) {
                            log.error(e, "Destination agents unmount failed");
                            return e;
                        }
                    }
                }, null);
        log.info("Unmounted clog/schema agents for {}", dstNdtUnmountRes.dst());

        if (cdcAgentUnmountRes.src() != null) {
            throw cdcAgentUnmountRes.src();
        }

        if (clogAgentUnmountRes.src() != null) {
            throw clogAgentUnmountRes.src();
        }

        if (dstNdtUnmountRes.src() != null) {
            throw dstNdtUnmountRes.src();
        }

        stTotal.stop();
        log.info("Uninstalling NDT done for {}", stTotal);
    } finally {
        IoUtils.silentClose(srcNdtDb, dstNdtDb, srcAppDb);
    }

    view.setProgress(PROGRESS_OFF);
    view.logNdtMessage("Migration completed");
}

From source file:com.google.devtools.build.android.AndroidResourceMerger.java

public static UnwrittenMergedAndroidData mergeData(ListeningExecutorService executorService,
        List<? extends SerializedAndroidData> transitive, List<? extends SerializedAndroidData> direct,
        ParsedAndroidData primary, Path primaryManifest, boolean allowPrimaryOverrideAll,
        AndroidDataDeserializer deserializer) throws MergingException {
    Stopwatch timer = Stopwatch.createStarted();
    try {/*from  w  w  w.  j  ava 2  s  . c o m*/
        AndroidDataMerger merger = AndroidDataMerger.createWithPathDeduplictor(executorService, deserializer);
        return merger.loadAndMerge(transitive, direct, primary, primaryManifest, allowPrimaryOverrideAll);
    } finally {
        logger.fine(String.format("merge finished in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
    }
}

From source file:qa.qcri.nadeef.core.util.sql.PostgresSQLDialect.java

/**
 * {@inheritDoc}//from ww  w  . j a va2s .c  o  m
 */
@Override
public int bulkLoad(DBConfig dbConfig, String tableName, Path file, boolean skipHeader) {
    Tracer tracer = Tracer.getTracer(PostgresSQLDialect.class);
    tracer.info("Bulk load CSV file " + file.toString());
    try (Connection conn = DBConnectionPool.createConnection(dbConfig, true);
            FileReader reader = new FileReader(file.toFile())) {
        Stopwatch watch = Stopwatch.createStarted();
        Schema schema = DBMetaDataTool.getSchema(dbConfig, tableName);
        StringBuilder builder = new StringBuilder();
        for (Column column : schema.getColumns()) {
            if (column.getColumnName().equalsIgnoreCase("TID"))
                continue;
            builder.append(column.getColumnName()).append(",");
        }
        builder.deleteCharAt(builder.length() - 1);

        CopyManager copyManager = new CopyManager((BaseConnection) conn);
        String sql = String.format("COPY %s (%s) FROM STDIN WITH (FORMAT 'csv', DELIMITER ',', HEADER %s)",
                tableName, builder.toString(), skipHeader ? "true" : "false");
        copyManager.copyIn(sql, reader);
        watch.stop();
        tracer.info("Bulk load finished in " + watch.elapsed(TimeUnit.MILLISECONDS) + " ms");
    } catch (Exception ex) {
        tracer.err("Loading csv file " + file.getFileName() + " failed.", ex);
        return 1;
    }
    return 0;
}

From source file:dk.dma.nogoservice.service.S3DataLoader.java

<T> T loadData(String key, Class<T> clazz) throws IOException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    S3Object object = amazonS3.getObject(S3DataLoader.DATA_BUCKET, key);

    File cacheFile = new File(tempDir, object.getObjectMetadata().getETag() + key);
    ObjectMapper objectMapper = new ObjectMapper();
    if (cacheLocally) {
        if (cacheFile.exists()) {
            log.info("Using local cached file {}", cacheFile.getAbsolutePath());
            return objectMapper.readValue(cacheFile, clazz);
        }//from   ww w  .  j av a2s  . c  o m
    }

    try (S3ObjectInputStream objectContent = object.getObjectContent()) {
        T data = objectMapper.readValue(objectContent, clazz);
        if (cacheLocally) {
            log.info("caching S3 file locally in {}", cacheFile.getAbsolutePath());
            objectMapper.writeValue(cacheFile, data);
        }
        return data;
    } finally {
        log.info("Loaded file {} from Amazon S3 in {} ms", key,
                stopwatch.stop().elapsed(TimeUnit.MILLISECONDS));
    }
}

From source file:qa.qcri.nadeef.core.utils.sql.PostgresSQLDialect.java

/**
 * {@inheritDoc}// ww w.j  a v  a  2 s . c  o m
 */
@Override
public int bulkLoad(DBConfig dbConfig, String tableName, Path file, boolean skipHeader) {
    Logger tracer = Logger.getLogger(PostgresSQLDialect.class);
    tracer.info("Bulk load CSV file " + file.toString());
    try (Connection conn = DBConnectionPool.createConnection(dbConfig, true);
            FileReader reader = new FileReader(file.toFile())) {
        Stopwatch watch = Stopwatch.createStarted();
        Schema schema = DBMetaDataTool.getSchema(dbConfig, tableName);
        StringBuilder builder = new StringBuilder();
        for (Column column : schema.getColumns()) {
            if (column.getColumnName().equalsIgnoreCase("TID"))
                continue;
            builder.append(column.getColumnName()).append(",");
        }
        builder.deleteCharAt(builder.length() - 1);

        CopyManager copyManager = new CopyManager((BaseConnection) conn);
        String sql = String.format("COPY %s (%s) FROM STDIN WITH (FORMAT 'csv', DELIMITER ',', HEADER %s)",
                tableName, builder.toString(), skipHeader ? "true" : "false");
        tracer.info(sql);
        copyManager.copyIn(sql, reader);
        watch.stop();
        tracer.info("Bulk load finished in " + watch.elapsed(TimeUnit.MILLISECONDS) + " ms");
    } catch (Exception ex) {
        tracer.error("Loading csv file " + file.getFileName() + " failed.", ex);
        return 1;
    }
    return 0;
}