Example usage for com.google.common.base Stopwatch toString

List of usage examples for com.google.common.base Stopwatch toString

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch toString.

Prototype

@GwtIncompatible("String.format()")
@Override
public String toString() 

Source Link

Document

Returns a string representation of the current elapsed time.

Usage

From source file:cosmos.mapred.MediawikiQueries.java

public void groupBy(Store id, Column colToFetch, Map<Column, Long> columnCounts, long totalResults)
        throws Exception {
    Stopwatch sw = new Stopwatch();

    sw.start();/*from   w w  w . j  av  a2s.  c  o m*/
    final CloseableIterable<Entry<RecordValue<?>, Long>> results = this.sorts.groupResults(id, colToFetch);
    TreeMap<RecordValue<?>, Long> counts = Maps.newTreeMap();

    for (Entry<RecordValue<?>, Long> entry : results) {
        counts.put(entry.getKey(), entry.getValue());
    }

    results.close();
    sw.stop();

    System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Took " + sw.toString()
            + " to group results");
    logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "groupBy:" + colToFetch);

    //    System.out.println(counts);

    final CloseableIterable<MultimapRecord> verifyResults = this.sorts.fetch(id, Index.define(colToFetch));
    TreeMap<RecordValue<?>, Long> records = Maps.newTreeMap();
    for (MultimapRecord r : verifyResults) {
        if (r.containsKey(colToFetch)) {
            for (RecordValue<?> val : r.get(colToFetch)) {
                if (records.containsKey(val)) {
                    records.put(val, records.get(val) + 1);
                } else {
                    records.put(val, 1l);
                }
            }
        }
    }

    verifyResults.close();

    if (counts.size() != records.size()) {
        System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Expected "
                + records.size() + " groups but found " + counts.size());
        System.exit(1);
    }

    Set<RecordValue<?>> countKeys = counts.keySet(), recordKeys = records.keySet();
    for (RecordValue<?> k : countKeys) {
        if (!recordKeys.contains(k)) {
            System.out.println(Thread.currentThread().getName() + ": " + colToFetch
                    + " - Expected to have count for " + k);
            System.exit(1);
        }

        Long actual = counts.get(k), expected = records.get(k);

        if (!actual.equals(expected)) {
            System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Expected " + expected
                    + " value(s) but found " + actual + " value(s) for " + k.value());
            System.exit(1);
        }
    }
}

From source file:de.nx42.maps4cim.map.texture.osm.OverpassBridge.java

/**
 * Downloads the requested data from the Overpass servers and stores
 * the osm xml file on the disk cache, using the specified hash String
 * for later retrieval/* w w  w .j  a  va2 s . com*/
 * @param hash the hash under which the file can be retrieved later
 * @return the resulting osm xml file
 * @throws TextureProcessingException if anything goes wrong while
  * downloading data from the Overpass servers
 */
protected File downloadAndCache(OsmHash hash) throws TextureProcessingException {
    Exception inner = null;
    for (String server : servers) {
        try {
            final Stopwatch stopwatch = Stopwatch.createStarted();

            // generate Query and store result in temp
            URL query = buildQueryURL(server);
            File dest = Cache.temporaray(hash.getXmlFileName());

            // 5 seconds connection timeout, 90 seconds for the server to execute the query
            // (so after this time, the download must start, or a timeout occurs)
            Network.downloadToFile(query, dest, 5, 90);

            // zip result and store in cache
            if (caching) {
                hash.storeInCache(dest);
            }

            stopwatch.stop();
            log.debug("Download from server {} finished in {}", query.getHost(), stopwatch.toString());
            // return plain text xml from temporary directory
            return dest;
        } catch (UnknownHostException e) {
            inner = e;
            log.error("The URL of Overpass-Server {} could not be resolved. Are you connected to the internet?",
                    e.getMessage());
        } catch (SocketTimeoutException e) {
            inner = e;
            log.error("Error getting data from Overpass Server " + server + "\nTrying next ...", e);
        } catch (IOException e) {
            inner = e;
            log.error("I/O Exception while processing OpenStreetMap source data.", e);
        }
    }
    throw new TextureProcessingException(
            "OpenStreetMap source data could " + "not be retrieved via Overpass API.", inner);
}

From source file:org.apache.accumulo.gc.replication.CloseWriteAheadLogReferences.java

@Override
public void run() {
    // As long as we depend on a newer Guava than Hadoop uses, we have to make sure we're compatible with
    // what the version they bundle uses.
    Stopwatch sw = new Stopwatch();

    Connector conn;// w w  w .  jav a 2s  .c o  m
    try {
        conn = context.getConnector();
    } catch (Exception e) {
        log.error("Could not create connector", e);
        throw new RuntimeException(e);
    }

    if (!ReplicationTable.isOnline(conn)) {
        log.debug("Replication table isn't online, not attempting to clean up wals");
        return;
    }

    Span findWalsSpan = Trace.start("findReferencedWals");
    HashSet<String> closed = null;
    try {
        sw.start();
        closed = getClosedLogs(conn);
    } finally {
        sw.stop();
        findWalsSpan.stop();
    }

    log.info("Found " + closed.size() + " WALs referenced in metadata in " + sw.toString());
    sw.reset();

    Span updateReplicationSpan = Trace.start("updateReplicationTable");
    long recordsClosed = 0;
    try {
        sw.start();
        recordsClosed = updateReplicationEntries(conn, closed);
    } finally {
        sw.stop();
        updateReplicationSpan.stop();
    }

    log.info(
            "Closed " + recordsClosed + " WAL replication references in replication table in " + sw.toString());
}

From source file:org.geoserver.jdbcconfig.internal.ConfigDatabase.java

public <T extends Info> CloseableIterator<T> query(final Class<T> of, final Filter filter,
        @Nullable Integer offset, @Nullable Integer limit, @Nullable SortBy... sortOrder) {

    checkNotNull(of);/*from  w ww. j av  a 2 s. co m*/
    checkNotNull(filter);
    checkArgument(offset == null || offset.intValue() >= 0);
    checkArgument(limit == null || limit.intValue() >= 0);

    QueryBuilder<T> sqlBuilder = QueryBuilder.forIds(of, dbMappings).filter(filter).offset(offset).limit(limit)
            .sortOrder(sortOrder);

    final StringBuilder sql = sqlBuilder.build();
    final Map<String, Object> namedParameters = sqlBuilder.getNamedParameters();
    final Filter unsupportedFilter = sqlBuilder.getUnsupportedFilter();
    final boolean fullySupported = Filter.INCLUDE.equals(unsupportedFilter);

    if (LOGGER.isLoggable(Level.FINER)) {
        LOGGER.finer("Original filter: " + filter);
        LOGGER.finer("Supported filter: " + sqlBuilder.getSupportedFilter());
        LOGGER.finer("Unsupported filter: " + sqlBuilder.getUnsupportedFilter());
    }
    logStatement(sql, namedParameters);

    Stopwatch sw = new Stopwatch().start();
    List<String> ids = template.queryForList(sql.toString(), namedParameters, String.class);
    sw.stop();
    if (LOGGER.isLoggable(Level.FINE)) {
        LOGGER.fine(Joiner.on("").join("query returned ", ids.size(), " records in ", sw.toString()));
    }

    List<T> lazyTransformed = Lists.transform(ids, new Function<String, T>() {
        @Override
        public T apply(String id) {
            return getById(id, of);
        }
    });

    CloseableIterator<T> result;

    if (fullySupported) {
        Iterator<T> iterator = lazyTransformed.iterator();
        result = new CloseableIteratorAdapter<T>(iterator);
    } else {
        Iterator<T> iterator = lazyTransformed.iterator();
        if (offset != null) {
            Iterators.skip(iterator, offset.intValue());
        }
        if (limit != null) {
            iterator = Iterators.limit(iterator, limit.intValue());
        }
        result = CloseableIteratorAdapter.filter(iterator, filter);
    }

    return result;
}

From source file:com.b2international.snowowl.snomed.reasoner.server.normalform.RelationshipNormalFormGenerator.java

public final int collectNormalFormChanges(final IProgressMonitor monitor,
        final OntologyChangeProcessor<StatementFragment> processor) {
    LOGGER.info(">>> Relationship normal form generation");
    final Stopwatch stopwatch = Stopwatch.createStarted();
    final int results = collectNormalFormChanges(monitor, processor, StatementFragmentOrdering.INSTANCE);
    LOGGER.info(MessageFormat.format("<<< Relationship normal form generation [{0}]", stopwatch.toString()));
    return results;
}

From source file:cosmos.mapred.MediawikiQueries.java

public long docIdFetch(Store id, Map<Column, Long> counts, long totalResults) throws Exception {
    Stopwatch sw = new Stopwatch();

    // This is dumb, I didn't pad the docids...
    String prev = "!";
    long resultCount = 0l;
    sw.start();/*ww  w . ja  v  a 2 s  .  c o m*/

    final CloseableIterable<MultimapRecord> results = this.sorts.fetch(id,
            Index.define(Defaults.DOCID_FIELD_NAME));

    for (MultimapRecord r : results) {
        sw.stop();

        resultCount++;

        String current = r.docId();
        if (prev.compareTo(current) > 0) {
            System.out.println("WOAH, got " + current + " docid which was greater than the previous " + prev);
            results.close();
            System.exit(1);
        }

        prev = current;

        sw.start();
    }

    sw.stop();

    System.out.println(
            Thread.currentThread().getName() + ": docIdFetch - Took " + sw.toString() + " to fetch results");
    logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "docIdFetch");

    results.close();

    return resultCount;
}

From source file:org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.java

/**
 * Mark and sweep. Main entry method for GC.
 *
 * @param markOnly whether to mark only/*from  www . ja v a 2s.  c om*/
 * @throws Exception the exception
 */
protected void markAndSweep(boolean markOnly) throws Exception {
    boolean threw = true;
    GarbageCollectorFileState fs = new GarbageCollectorFileState(root);
    try {
        Stopwatch sw = Stopwatch.createStarted();
        LOG.info("Starting Blob garbage collection with markOnly [{}]", markOnly);

        long markStart = System.currentTimeMillis();
        mark(fs);
        if (!markOnly) {
            long deleteCount = sweep(fs, markStart);
            threw = false;

            long maxTime = getLastMaxModifiedTime(markStart) > 0 ? getLastMaxModifiedTime(markStart)
                    : markStart;
            LOG.info(
                    "Blob garbage collection completed in {}. Number of blobs deleted [{}] with max modification time of [{}]",
                    sw.toString(), deleteCount, timestampToString(maxTime));
        }
    } catch (Exception e) {
        LOG.error("Blob garbage collection error", e);
        throw e;
    } finally {
        if (!LOG.isTraceEnabled()) {
            Closeables.close(fs, threw);
        }
    }
}

From source file:org.apache.rocketmq.console.task.DashboardCollectTask.java

@Scheduled(cron = "30 0/1 * * * ?")
@MultiMQAdminCmdMethod(timeoutMillis = 5000)
public void collectTopic() {
    if (!rmqConfigure.isEnableDashBoardCollect()) {
        return;//w  w w.  j a  v  a 2s  .  c om
    }
    Date date = new Date();
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        TopicList topicList = mqAdminExt.fetchAllTopicList();
        Set<String> topicSet = topicList.getTopicList();
        for (String topic : topicSet) {
            if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)
                    || topic.startsWith(MixAll.DLQ_GROUP_TOPIC_PREFIX)) {
                continue;
            }

            TopicRouteData topicRouteData = mqAdminExt.examineTopicRouteInfo(topic);

            GroupList groupList = mqAdminExt.queryTopicConsumeByWho(topic);

            double inTPS = 0;

            long inMsgCntToday = 0;

            double outTPS = 0;

            long outMsgCntToday = 0;

            for (BrokerData bd : topicRouteData.getBrokerDatas()) {
                String masterAddr = bd.getBrokerAddrs().get(MixAll.MASTER_ID);
                if (masterAddr != null) {
                    try {
                        stopwatch.start();
                        log.info("start time: {}", stopwatch.toString());
                        BrokerStatsData bsd = mqAdminExt.viewBrokerStatsData(masterAddr,
                                BrokerStatsManager.TOPIC_PUT_NUMS, topic);
                        stopwatch.stop();
                        log.info("stop time : {}", stopwatch.toString());
                        stopwatch.reset();
                        inTPS += bsd.getStatsMinute().getTps();
                        inMsgCntToday += StatsAllSubCommand.compute24HourSum(bsd);
                    } catch (Exception e) {
                        //                            throw Throwables.propagate(e);
                    }
                }
            }

            if (groupList != null && !groupList.getGroupList().isEmpty()) {

                for (String group : groupList.getGroupList()) {
                    for (BrokerData bd : topicRouteData.getBrokerDatas()) {
                        String masterAddr = bd.getBrokerAddrs().get(MixAll.MASTER_ID);
                        if (masterAddr != null) {
                            try {
                                String statsKey = String.format("%s@%s", topic, group);
                                BrokerStatsData bsd = mqAdminExt.viewBrokerStatsData(masterAddr,
                                        BrokerStatsManager.GROUP_GET_NUMS, statsKey);
                                outTPS += bsd.getStatsMinute().getTps();
                                outMsgCntToday += StatsAllSubCommand.compute24HourSum(bsd);
                            } catch (Exception e) {
                                //                                    throw Throwables.propagate(e);
                            }
                        }
                    }
                }
            }

            List<String> list;
            try {
                list = dashboardCollectService.getTopicMap().get(topic);
            } catch (ExecutionException e) {
                throw Throwables.propagate(e);
            }
            if (null == list) {
                list = Lists.newArrayList();
            }

            list.add(date.getTime() + "," + new BigDecimal(inTPS).setScale(5, BigDecimal.ROUND_HALF_UP) + ","
                    + inMsgCntToday + "," + new BigDecimal(outTPS).setScale(5, BigDecimal.ROUND_HALF_UP) + ","
                    + outMsgCntToday);
            dashboardCollectService.getTopicMap().put(topic, list);

        }

        log.debug("Topic Collected Data in memory = {}"
                + JsonUtil.obj2String(dashboardCollectService.getTopicMap().asMap()));
    } catch (Exception err) {
        throw Throwables.propagate(err);
    }
}

From source file:org.apache.jackrabbit.oak.run.CompactCommand.java

@Override
public void execute(String... args) throws Exception {
    OptionParser parser = new OptionParser();
    OptionSpec<String> directoryArg = parser.nonOptions("Path to segment store (required)")
            .ofType(String.class);
    OptionSpec<Void> forceFlag = parser.accepts("force",
            "Force compaction and ignore non matching segment version");
    OptionSpec<?> segmentTar = parser.accepts("segment-tar", "Use oak-segment-tar instead of oak-segment");
    OptionSet options = parser.parse(args);

    String path = directoryArg.value(options);
    if (path == null) {
        System.err.println("Compact a file store. Usage: compact [path] <options>");
        parser.printHelpOn(System.err);
        System.exit(-1);//from ww  w  .j a  v a  2s. com
    }

    File directory = new File(path);
    boolean force = options.has(forceFlag);

    boolean success = false;
    Set<String> beforeLs = newHashSet();
    Set<String> afterLs = newHashSet();
    Stopwatch watch = Stopwatch.createStarted();

    System.out.println("Compacting " + directory);
    System.out.println("    before ");
    beforeLs.addAll(list(directory));
    long sizeBefore = FileUtils.sizeOfDirectory(directory);
    System.out
            .println("    size " + IOUtils.humanReadableByteCount(sizeBefore) + " (" + sizeBefore + " bytes)");
    System.out.println("    -> compacting");

    try {
        if (options.has(segmentTar)) {
            SegmentTarUtils.compact(directory, force);
        } else {
            SegmentUtils.compact(directory, force);
        }
        success = true;
    } catch (Throwable e) {
        System.out.println("Compaction failure stack trace:");
        e.printStackTrace(System.out);
    } finally {
        watch.stop();
        if (success) {
            System.out.println("    after ");
            afterLs.addAll(list(directory));
            long sizeAfter = FileUtils.sizeOfDirectory(directory);
            System.out.println(
                    "    size " + IOUtils.humanReadableByteCount(sizeAfter) + " (" + sizeAfter + " bytes)");
            System.out.println("    removed files " + difference(beforeLs, afterLs));
            System.out.println("    added files " + difference(afterLs, beforeLs));
            System.out.println("Compaction succeeded in " + watch.toString() + " ("
                    + watch.elapsed(TimeUnit.SECONDS) + "s).");
        } else {
            System.out.println("Compaction failed in " + watch.toString() + " ("
                    + watch.elapsed(TimeUnit.SECONDS) + "s).");
            System.exit(1);
        }
    }
}

From source file:org.zephyrsoft.sdb2.service.IndexerServiceImpl.java

@Override
public void index(final IndexType indexType, final Collection<Song> songs) {
    executor.execute(new Runnable() {
        @Override//from   ww  w  .j  a va 2s  .  c o  m
        public void run() {
            Stopwatch stopwatch = Stopwatch.createStarted();

            Directory directory = new RAMDirectory();
            try {
                LOG.debug("available tokenizers: {}", TokenizerFactory.availableTokenizers());
                LOG.debug("available token filters: {}", TokenFilterFactory.availableTokenFilters());
                Analyzer analyzer = CustomAnalyzer.builder().withTokenizer("standard")
                        .addTokenFilter("lowercase")
                        .addTokenFilter("ngram", "minGramSize", "1", "maxGramSize", "25").build();
                IndexWriterConfig config = new IndexWriterConfig(analyzer);
                try (IndexWriter writer = new IndexWriter(directory, config)) {
                    for (Song song : songs) {
                        Document document = createDocument(song);
                        writer.addDocument(document);
                        songByUuid.put(song.getUUID(), song);
                    }
                } catch (IOException e) {
                    LOG.warn("couldn't index songs", e);
                }
            } catch (IOException e1) {
                LOG.warn("couldn't create analyzer", e1);
            } finally {
                putIndex(indexType, directory);
                stopwatch.stop();
                LOG.info("indexing songs in background thread took {}", stopwatch.toString());
            }
        }
    });
}