List of usage examples for com.google.common.base Stopwatch stop
public Stopwatch stop()
From source file:joshelser.LimitAndSumColumnFamilyIterator.java
/** * Given the current position in the {@link source}, filter to only the columns specified. Sets topKey and topValue to non-null on success * //from www. j av a 2 s .co m * @throws IOException */ protected void aggregate() throws IOException { Stopwatch aggrSw = Stopwatch.createStarted(); // Explicitly track the "startKey" bound on the Range of data we want to observe // Would be desirable to just use a Range for this, but you incur a penalty for this // due to the repeated creation/deletion of the Range object. I believe this is due to the additional // call to Key#compareTo(Key) that a call to Range#contains(Key) would perform when we really only care about // one of those comparisons Key currentStartKey = currentRange.getStartKey(); if (!getSource().hasTop()) { nextRecordNotFound(); aggrSw.stop(); log.trace("Aggregate duration: " + aggrSw.elapsed(TimeUnit.MILLISECONDS)); return; } while (getSource().hasTop()) { final Key currentKey = getSource().getTopKey(); final Value currentValue = getSource().getTopValue(); if (currentRange.afterEndKey(currentKey)) { break; } currentKey.getColumnFamily(colfamHolder); final Iterator<Text> remainingColumns = desiredColumns.tailSet(colfamHolder).iterator(); if (desiredColumns.contains(colfamHolder)) { // found a column we wanted nextRecordFound(currentKey, currentValue); // The tailSet returns elements greater than or equal to the provided element // need to consume the equality element remainingColumns.next(); } if (remainingColumns.hasNext()) { // Get the row avoiding a new Text currentKey.getRow(rowHolder); Text nextColumn = remainingColumns.next(); currentStartKey = new Key(rowHolder, nextColumn); } else { currentStartKey = currentKey.followingKey(PartialKey.ROW); // No more data to read, outside of where we wanted to look if (currentRange.afterEndKey(currentStartKey)) { setReturnValue(aggrSw); return; } } log.trace("Moving to " + currentStartKey); if (!getSource().hasTop()) { setReturnValue(aggrSw); return; } boolean advancedToDesiredPoint = false; // Move down to the next Key for (int i = 0; i < 10 && !advancedToDesiredPoint; i++) { getSource().next(); if (getSource().hasTop()) { // Move to at least currentStartKey if (currentStartKey.compareTo(getSource().getTopKey()) <= 0) { advancedToDesiredPoint = true; } } else { setReturnValue(aggrSw); return; } } if (!advancedToDesiredPoint) { log.debug("Seeking to find next desired key: " + currentStartKey); getSource().seek( new Range(currentStartKey, true, currentRange.getEndKey(), currentRange.isEndKeyInclusive()), currentColumnFamilies, currentColumnFamiliesInclusive); } } setReturnValue(aggrSw); }
From source file:qa.qcri.nadeef.core.pipeline.ViolationRepair.java
/** * Execute the operator.//from www .ja v a2s .c o m * * @param violations input object. * @return output object. */ @Override @SuppressWarnings("unchecked") public Collection<Collection<Fix>> execute(Collection<Violation> violations) throws Exception { Stopwatch stopwatch = Stopwatch.createStarted(); Rule rule = getCurrentContext().getRule(); List<Collection<Fix>> result = Lists.newArrayList(); int count = 0; for (Violation violation : violations) { try { Collection<Fix> fix = (Collection<Fix>) rule.repair(violation); result.add(fix); count++; } catch (Exception ex) { Tracer tracer = Tracer.getTracer(ViolationRepair.class); tracer.err("Exception in repair method.", ex); } setPercentage(count / violations.size()); } long elapseTime; if (violations.size() != 0) { elapseTime = stopwatch.elapsed(TimeUnit.MILLISECONDS) / violations.size(); } else { elapseTime = stopwatch.elapsed(TimeUnit.MILLISECONDS); } PerfReport.appendMetric(PerfReport.Metric.RepairCallTime, elapseTime); stopwatch.stop(); return result; }
From source file:monasca.persister.repository.vertica.VerticaMetricRepo.java
private void updateIdCaches(String id) { Stopwatch sw = Stopwatch.createStarted(); for (Sha1HashId defId : definitionIdSet) { definitionsIdCache.put(defId, defId); }/*from ww w. j a va2 s . com*/ for (Sha1HashId dimId : dimensionIdSet) { dimensionsIdCache.put(dimId, dimId); } for (Sha1HashId defDimsId : definitionDimensionsIdSet) { definitionDimensionsIdCache.put(defDimsId, defDimsId); } clearTempCaches(); sw.stop(); logger.debug("[{}]: clearing temp caches took: {}", id, sw); }
From source file:com.github.steveash.jg2p.seq.PhonemeCrfTrainer.java
public void trainForInstances(InstanceList examples) { if (state == State.Initializing) { initializeFor(examples);//w w w. j av a 2 s . c o m } state = State.Training; Stopwatch watch = Stopwatch.createStarted(); CRFTrainerByThreadedLabelLikelihood trainer = makeNewTrainer(crf); // CRFTrainerByLabelLikelihood trainer = makeNewTrainerSingleThreaded(crf); this.lastTrainer = trainer; trainer.train(examples, opts.maxIterations); // trainer.train(examples, 8, 250, new double[]{0.15, 1.0}); // trainer.train(examples, 8, new double[]{0.15, 1.0}); trainer.shutdown(); // just closes the pool; next call to train will create a new one if (opts.trimFeaturesUnderPercentile > 0) { trainer.getCRF().pruneFeaturesBelowPercentile(opts.trimFeaturesUnderPercentile); trainer.train(examples); trainer.shutdown(); } watch.stop(); log.info("Training took " + watch); if (printEval) { log.info("Accuracy on training data: " + accuracyFor(examples)); } }
From source file:com.google.api.ads.adwords.jaxws.extensions.processors.ReportProcessor.java
/** * Process the local files delegating the call to the concrete * implementation./*from w w w. ja v a2 s . c o m*/ * * @param reportType * the report type. * @param localFiles * the local files. * @param dateStart * the start date. * @param dateEnd * the end date. * @param dateRangeType * the date range type. */ private <R extends Report> void processLocalFiles(ReportDefinitionReportType reportType, Collection<File> localFiles, String dateStart, String dateEnd, ReportDefinitionDateRangeType dateRangeType, Date reportDownloadDate) { Stopwatch stopwatch = Stopwatch.createStarted(); @SuppressWarnings("unchecked") Class<R> reportBeanClass = (Class<R>) this.csvReportEntitiesMapping.getReportBeanClass(reportType); this.processFiles(reportBeanClass, localFiles, dateRangeType, dateStart, dateEnd, reportDownloadDate); stopwatch.stop(); LOGGER.info( "\n* DB Process finished in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000) + " seconds ***"); }
From source file:com.twitter.hraven.datasource.HdfsStatsService.java
/** * Scans the hbase table and populates the hdfs stats * @param cluster/*from ww w. j av a 2s .c o m*/ * @param scan * @param maxCount * @return * @throws IOException */ private List<HdfsStats> createFromScanResults(String cluster, String path, Scan scan, int maxCount, boolean checkPath, long starttime, long endtime) throws IOException { Map<HdfsStatsKey, HdfsStats> hdfsStats = new HashMap<HdfsStatsKey, HdfsStats>(); ResultScanner scanner = null; Stopwatch timer = new Stopwatch().start(); int rowCount = 0; long colCount = 0; long resultSize = 0; try { scanner = hdfsUsageTable.getScanner(scan); for (Result result : scanner) { if (result != null && !result.isEmpty()) { colCount += result.size(); resultSize += result.getWritableSize(); rowCount = populateHdfsStats(result, hdfsStats, checkPath, path, starttime, endtime, rowCount); // return if we've already hit the limit if (rowCount >= maxCount) { break; } } } } finally { timer.stop(); LOG.info("In createFromScanResults For cluster " + cluster + " Fetched from hbase " + rowCount + " rows, " + colCount + " columns, " + resultSize + " bytes ( " + resultSize / (1024 * 1024) + ") MB, in total time of " + timer); if (scanner != null) { scanner.close(); } } List<HdfsStats> values = new ArrayList<HdfsStats>(hdfsStats.values()); // sort so that timestamps are arranged in descending order Collections.sort(values); return values; }
From source file:org.apache.beam.runners.spark.stateful.StateSpecFunctions.java
/** * A {@link org.apache.spark.streaming.StateSpec} function to support reading from * an {@link UnboundedSource}./*w w w .j a v a 2 s.co m*/ * * <p>This StateSpec function expects the following: * <ul> * <li>Key: The (partitioned) Source to read from.</li> * <li>Value: An optional {@link UnboundedSource.CheckpointMark} to start from.</li> * <li>State: A byte representation of the (previously) persisted CheckpointMark.</li> * </ul> * And returns an iterator over all read values (for the micro-batch). * * <p>This stateful operation could be described as a flatMap over a single-element stream, which * outputs all the elements read from the {@link UnboundedSource} for this micro-batch. * Since micro-batches are bounded, the provided UnboundedSource is wrapped by a * {@link MicrobatchSource} that applies bounds in the form of duration and max records * (per micro-batch). * * * <p>In order to avoid using Spark Guava's classes which pollute the * classpath, we use the {@link StateSpec#function(scala.Function3)} signature which employs * scala's native {@link scala.Option}, instead of the * {@link StateSpec#function(org.apache.spark.api.java.function.Function3)} signature, * which employs Guava's {@link com.google.common.base.Optional}. * * <p>See also <a href="https://issues.apache.org/jira/browse/SPARK-4819">SPARK-4819</a>.</p> * * @param runtimeContext A serializable {@link SparkRuntimeContext}. * @param <T> The type of the input stream elements. * @param <CheckpointMarkT> The type of the {@link UnboundedSource.CheckpointMark}. * @return The appropriate {@link org.apache.spark.streaming.StateSpec} function. */ public static <T, CheckpointMarkT extends UnboundedSource.CheckpointMark> scala.Function3<Source<T>, scala.Option<CheckpointMarkT>, State<Tuple2<byte[], Instant>>, Tuple2<Iterable<byte[]>, Metadata>> mapSourceFunction( final SparkRuntimeContext runtimeContext, final String stepName) { return new SerializableFunction3<Source<T>, Option<CheckpointMarkT>, State<Tuple2<byte[], Instant>>, Tuple2<Iterable<byte[]>, Metadata>>() { @Override public Tuple2<Iterable<byte[]>, Metadata> apply(Source<T> source, scala.Option<CheckpointMarkT> startCheckpointMark, State<Tuple2<byte[], Instant>> state) { MetricsContainerStepMap metricsContainers = new MetricsContainerStepMap(); MetricsContainer metricsContainer = metricsContainers.getContainer(stepName); // Add metrics container to the scope of org.apache.beam.sdk.io.Source.Reader methods // since they may report metrics. try (Closeable ignored = MetricsEnvironment.scopedMetricsContainer(metricsContainer)) { // source as MicrobatchSource MicrobatchSource<T, CheckpointMarkT> microbatchSource = (MicrobatchSource<T, CheckpointMarkT>) source; // Initial high/low watermarks. Instant lowWatermark = BoundedWindow.TIMESTAMP_MIN_VALUE; final Instant highWatermark; // if state exists, use it, otherwise it's first time so use the startCheckpointMark. // startCheckpointMark may be EmptyCheckpointMark (the Spark Java API tries to apply // Optional(null)), which is handled by the UnboundedSource implementation. Coder<CheckpointMarkT> checkpointCoder = microbatchSource.getCheckpointMarkCoder(); CheckpointMarkT checkpointMark; if (state.exists()) { // previous (output) watermark is now the low watermark. lowWatermark = state.get()._2(); checkpointMark = CoderHelpers.fromByteArray(state.get()._1(), checkpointCoder); LOG.info("Continue reading from an existing CheckpointMark."); } else if (startCheckpointMark.isDefined() && !startCheckpointMark.get().equals(EmptyCheckpointMark.get())) { checkpointMark = startCheckpointMark.get(); LOG.info("Start reading from a provided CheckpointMark."); } else { checkpointMark = null; LOG.info("No CheckpointMark provided, start reading from default."); } // create reader. final MicrobatchSource.Reader/*<T>*/ microbatchReader; final Stopwatch stopwatch = Stopwatch.createStarted(); long readDurationMillis = 0; try { microbatchReader = (MicrobatchSource.Reader) microbatchSource .getOrCreateReader(runtimeContext.getPipelineOptions(), checkpointMark); } catch (IOException e) { throw new RuntimeException(e); } // read microbatch as a serialized collection. final List<byte[]> readValues = new ArrayList<>(); WindowedValue.FullWindowedValueCoder<T> coder = WindowedValue.FullWindowedValueCoder .of(source.getDefaultOutputCoder(), GlobalWindow.Coder.INSTANCE); try { // measure how long a read takes per-partition. boolean finished = !microbatchReader.start(); while (!finished) { final WindowedValue<T> wv = WindowedValue.of((T) microbatchReader.getCurrent(), microbatchReader.getCurrentTimestamp(), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING); readValues.add(CoderHelpers.toByteArray(wv, coder)); finished = !microbatchReader.advance(); } // end-of-read watermark is the high watermark, but don't allow decrease. final Instant sourceWatermark = microbatchReader.getWatermark(); highWatermark = sourceWatermark.isAfter(lowWatermark) ? sourceWatermark : lowWatermark; readDurationMillis = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); LOG.info("Source id {} spent {} millis on reading.", microbatchSource.getId(), readDurationMillis); // if the Source does not supply a CheckpointMark skip updating the state. @SuppressWarnings("unchecked") final CheckpointMarkT finishedReadCheckpointMark = (CheckpointMarkT) microbatchReader .getCheckpointMark(); byte[] codedCheckpoint = new byte[0]; if (finishedReadCheckpointMark != null) { codedCheckpoint = CoderHelpers.toByteArray(finishedReadCheckpointMark, checkpointCoder); } else { LOG.info("Skipping checkpoint marking because the reader failed to supply one."); } // persist the end-of-read (high) watermark for following read, where it will become // the next low watermark. state.update(new Tuple2<>(codedCheckpoint, highWatermark)); } catch (IOException e) { throw new RuntimeException("Failed to read from reader.", e); } final ArrayList<byte[]> payload = Lists .newArrayList(Iterators.unmodifiableIterator(readValues.iterator())); return new Tuple2<>((Iterable<byte[]>) payload, new Metadata(readValues.size(), lowWatermark, highWatermark, readDurationMillis, metricsContainers)); } catch (IOException e) { throw new RuntimeException(e); } } }; }
From source file:org.rhq.server.metrics.MetricsServer.java
public AggregateNumericMetric getSummaryAggregate(int scheduleId, long beginTime, long endTime) { Stopwatch stopwatch = new Stopwatch().start(); try {// www .java 2 s . c om DateTime begin = new DateTime(beginTime); if (dateTimeService.isInRawDataRange(begin)) { Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleId, beginTime, endTime); return calculateAggregatedRaw(metrics, beginTime); } Bucket bucket = getBucket(begin); List<AggregateNumericMetric> metrics = dao.findAggregateMetrics(scheduleId, bucket, beginTime, endTime); return calculateAggregate(metrics, beginTime, bucket); } finally { stopwatch.stop(); if (log.isDebugEnabled()) { log.debug("Finished calculating resource summary aggregate for [scheduleId: " + scheduleId + ", beginTime: " + beginTime + ", endTime: " + endTime + "] in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms"); } } }
From source file:org.rhq.server.metrics.MetricsServer.java
public AggregateNumericMetric getSummaryAggregate(List<Integer> scheduleIds, long beginTime, long endTime) { Stopwatch stopwatch = new Stopwatch().start(); try {//from w ww .j av a2 s .c o m DateTime begin = new DateTime(beginTime); if (dateTimeService.isInRawDataRange(new DateTime(beginTime))) { Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleIds, beginTime, endTime); return calculateAggregatedRaw(metrics, beginTime); } Bucket bucket = getBucket(begin); List<AggregateNumericMetric> metrics = loadMetrics(scheduleIds, beginTime, endTime, bucket); return calculateAggregate(metrics, beginTime, bucket); } finally { stopwatch.stop(); if (log.isDebugEnabled()) { log.debug("Finished calculating group summary aggregate for [scheduleIds: " + scheduleIds + ", beginTime: " + beginTime + ", endTime: " + endTime + "] in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms"); } } }
From source file:org.geoserver.jdbcconfig.internal.ConfigDatabase.java
public <T extends Info> CloseableIterator<T> query(final Class<T> of, final Filter filter, @Nullable Integer offset, @Nullable Integer limit, @Nullable SortBy... sortOrder) { checkNotNull(of);/*from w w w. j av a2s . c o m*/ checkNotNull(filter); checkArgument(offset == null || offset.intValue() >= 0); checkArgument(limit == null || limit.intValue() >= 0); QueryBuilder<T> sqlBuilder = QueryBuilder.forIds(of, dbMappings).filter(filter).offset(offset).limit(limit) .sortOrder(sortOrder); final StringBuilder sql = sqlBuilder.build(); final Map<String, Object> namedParameters = sqlBuilder.getNamedParameters(); final Filter unsupportedFilter = sqlBuilder.getUnsupportedFilter(); final boolean fullySupported = Filter.INCLUDE.equals(unsupportedFilter); if (LOGGER.isLoggable(Level.FINER)) { LOGGER.finer("Original filter: " + filter); LOGGER.finer("Supported filter: " + sqlBuilder.getSupportedFilter()); LOGGER.finer("Unsupported filter: " + sqlBuilder.getUnsupportedFilter()); } logStatement(sql, namedParameters); Stopwatch sw = new Stopwatch().start(); List<String> ids = template.queryForList(sql.toString(), namedParameters, String.class); sw.stop(); if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine(Joiner.on("").join("query returned ", ids.size(), " records in ", sw.toString())); } List<T> lazyTransformed = Lists.transform(ids, new Function<String, T>() { @Override public T apply(String id) { return getById(id, of); } }); CloseableIterator<T> result; if (fullySupported) { Iterator<T> iterator = lazyTransformed.iterator(); result = new CloseableIteratorAdapter<T>(iterator); } else { Iterator<T> iterator = lazyTransformed.iterator(); if (offset != null) { Iterators.skip(iterator, offset.intValue()); } if (limit != null) { iterator = Iterators.limit(iterator, limit.intValue()); } result = CloseableIteratorAdapter.filter(iterator, filter); } return result; }