List of usage examples for com.google.common.base Stopwatch elapsed
@CheckReturnValue public long elapsed(TimeUnit desiredUnit)
From source file:com.google.cloud.genomics.dataflow.readers.bam.Reader.java
/** * To compare how sharded reading works vs. plain HTSJDK sequential iteration, * this method implements such iteration. * This makes it easier to discover errors such as reads that are somehow * skipped by a sharded approach.//from w w w.jav a2s. c om */ public static Iterable<Read> readSequentiallyForTesting(Objects storageClient, String storagePath, Contig contig, ReaderOptions options) throws IOException { Stopwatch timer = Stopwatch.createStarted(); SamReader samReader = BAMIO.openBAM(storageClient, storagePath, options.getStringency()); SAMRecordIterator iterator = samReader.queryOverlapping(contig.referenceName, (int) contig.start + 1, (int) contig.end); List<Read> reads = new ArrayList<Read>(); int recordsBeforeStart = 0; int recordsAfterEnd = 0; int mismatchedSequence = 0; int recordsProcessed = 0; Filter filter = setupFilter(options, contig.referenceName); while (iterator.hasNext()) { SAMRecord record = iterator.next(); final boolean passesFilter = passesFilter(record, filter, contig.referenceName); if (!passesFilter) { mismatchedSequence++; continue; } if (record.getAlignmentStart() < contig.start) { recordsBeforeStart++; continue; } if (record.getAlignmentStart() > contig.end) { recordsAfterEnd++; continue; } reads.add(ReadUtils.makeReadGrpc(record)); recordsProcessed++; } timer.stop(); LOG.info("NON SHARDED: Processed " + recordsProcessed + " in " + timer + ". Speed: " + (recordsProcessed * 1000) / timer.elapsed(TimeUnit.MILLISECONDS) + " reads/sec" + ", skipped other sequences " + mismatchedSequence + ", skippedBefore " + recordsBeforeStart + ", skipped after " + recordsAfterEnd); return reads; }
From source file:uk.ac.ebi.atlas.search.diffanalytics.DiffAnalyticsDao.java
public int fetchResultCount(Optional<? extends Collection<IndexedAssayGroup>> indexedContrasts, Optional<? extends Collection<String>> geneIds, String specie) { Optional<ImmutableSet<IndexedAssayGroup>> uniqueIndexedContrasts = uniqueIndexedContrasts(indexedContrasts); log("fetchResultCount", uniqueIndexedContrasts, geneIds); Stopwatch stopwatch = Stopwatch.createStarted(); DatabaseQuery databaseQuery = buildCount(uniqueIndexedContrasts, geneIds, specie); int count = jdbcTemplate.queryForObject(databaseQuery.getQuery(), Integer.class, databaseQuery.getParameters().toArray()); LOGGER.debug(String.format("fetchResultCount returned %s in %.2f seconds", count, stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000D)); return count; }
From source file:org.ow2.proactive.scheduler.task.TaskLauncher.java
private TaskResultImpl getTaskResult(Stopwatch taskStopwatchForFailures, SchedulerException exception) { taskLogger.getErrorSink().println(exception.getMessage()); return new TaskResultImpl(taskId, exception, taskLogger.getLogs(), taskStopwatchForFailures.elapsed(TimeUnit.MILLISECONDS)); }
From source file:com.google.api.ads.adwords.awalerting.processor.AlertProcessor.java
/** * Process reports for the given account IDs under the manager account. * * @param reports the downloaded reports * @param rulesConfig the JSON config of current alert rules * @param alertMessage the current alert message template * @param actionsConfig the JSON config of current alert actions *//* ww w .j a v a 2 s . c o m*/ protected void processReports(List<ReportData> reports, JsonArray rulesConfig, String alertMessage, JsonArray actionsConfig) throws AlertProcessingException { if (reports == null || reports.isEmpty()) { LOGGER.info("No reports to process!"); return; } LOGGER.info("*** Start processing reports..."); Stopwatch stopwatch = Stopwatch.createStarted(); applyAlertRulesAndMessages(reports, rulesConfig, alertMessage); printReports(reports, "*** Reports after processing alert rules and messages:"); applyAlertActions(reports, actionsConfig); stopwatch.stop(); LOGGER.info("*** Finished processing all reports in {} seconds.", stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000); }
From source file:ezbake.frack.common.workers.WarehausBatchWorker.java
/** * Flushes the entries accumulated this far and writes to the warehaus. * * @throws IOException/*from w w w . jav a2 s . c om*/ * throws if an error occurs during warehaus update */ private IngestStatus flushQueue() throws IOException { WarehausService.Client warehaus = null; IngestStatus status; synchronized (this) { try { warehaus = pool.getClient(WarehausServiceConstants.SERVICE_NAME, WarehausService.Client.class); Stopwatch stopwatch = Stopwatch.createStarted(); status = warehaus.put(putRequest, getWarehausToken()); if (status.getStatus() != IngestStatusEnum.SUCCESS) { return status; } logger.info("Indexed {} documents | {} ms", putRequest.getEntriesSize(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); logger.debug("Sending successfully ingested data to pipes"); for (int i = 0; i < objects.size(); i++) { T obj = objects.get(i); Visibility v = visibilities.get(i); outputToPipes(v, obj); } clearArtifacts(); pool.returnToPool(warehaus); } catch (TException e) { logger.error("Error during batch indexing of documents.", e); pool.returnBrokenToPool(warehaus); throw new IOException(e); } } return status; }
From source file:jobs.ComputeStratifiedFrequencies2.java
@Override public void doJob() throws Exception { Logger.info("trends computation started..."); Stopwatch stopwatch = Stopwatch.createUnstarted(); stopwatch.start();//from w w w.ja va 2 s . c om int now = Integer.parseInt((String) play.Play.configuration.get("analysis.year")); int y5 = now - 5; //iterate over all the years and save the values Logger.info("Reading index..."); Directory directory = FSDirectory.open(VirtualFile.fromRelativePath("/indexes/index-" + y5).getRealFile()); DirectoryReader ireader = DirectoryReader.open(directory); Terms terms = SlowCompositeReaderWrapper.wrap(ireader).terms("contents"); TermsEnum iterator = terms.iterator(null); BytesRef byteRef; Map<Long, Double> frequencies = new HashMap<Long, Double>(); while ((byteRef = iterator.next()) != null) { String term = new String(byteRef.bytes, byteRef.offset, byteRef.length); if (!term.contains("_")) { Logger.info("Term: " + term); Stopwatch time = Stopwatch.createUnstarted(); time.start(); Phrase phrase = Phrase.find("byValue", term).first(); if (phrase != null) { Logger.info("Term: " + phrase.value + " (" + term + ")"); int frequency = iterator.docFreq(); frequencies.put(phrase.id, (double) frequency); } time.stop(); Logger.info("- Query time: " + time.elapsed(TimeUnit.MILLISECONDS)); } } ireader.close(); directory.close(); Phrase.em().flush(); Phrase.em().clear(); int counter = 0; for (Long id : frequencies.keySet()) { Phrase phrase = Phrase.findById(id); phrase.frequency5y = frequencies.get(id); phrase.save(); counter++; Logger.info("Counter: " + counter); if (counter % 1000 == 0) { Phrase.em().flush(); Phrase.em().clear(); } } stopwatch.stop(); Utils.emailAdmin("Yearly frequency calculated. ", "Job finished in " + stopwatch.elapsed(TimeUnit.MINUTES) + " minutes."); Logger.info("Job done."); }
From source file:com.github.joshelser.YcsbBatchScanner.java
private void _run() throws Exception { log.info("Computing ranges"); // numRanges//from w w w.ja v a2s.c o m List<Range> ranges = computeRanges(); log.info("All ranges calculated: {} ranges found", ranges.size()); for (int i = 0; i < numIterations; i++) { List<List<Range>> partitionedRanges = Lists.partition(ranges, numRangesPerPartition); log.info("Executing {} range partitions using a pool of {} threads", partitionedRanges.size(), threadPoolSize); List<Future<Integer>> results = new ArrayList<>(); Stopwatch sw = new Stopwatch(); sw.start(); for (List<Range> partition : partitionedRanges) { // results.add(this.svc.submit(new BatchScannerQueryTask(conn, partition))); results.add(this.svc.submit(new ScannerQueryTask(conn, partition))); } for (Future<Integer> result : results) { log.debug("Found {} results", result.get()); } sw.stop(); log.info("Queries executed in {} ms", sw.elapsed(TimeUnit.MILLISECONDS)); } }
From source file:org.apache.drill.exec.client.QuerySubmitter.java
public int submitQuery(DrillClient client, String plan, String type, String format, int width) throws Exception { PrintingResultsListener listener;/*ww w.ja v a 2 s . c o m*/ String[] queries; QueryType queryType; type = type.toLowerCase(); switch (type) { case "sql": queryType = QueryType.SQL; queries = plan.trim().split(";"); break; case "logical": queryType = QueryType.LOGICAL; queries = new String[] { plan }; break; case "physical": queryType = QueryType.PHYSICAL; queries = new String[] { plan }; break; default: System.out.println("Invalid query type: " + type); return -1; } Format outputFormat; format = format.toLowerCase(); switch (format) { case "csv": outputFormat = Format.CSV; break; case "tsv": outputFormat = Format.TSV; break; case "table": outputFormat = Format.TABLE; break; default: System.out.println("Invalid format type: " + format); return -1; } Stopwatch watch = new Stopwatch(); for (String query : queries) { listener = new PrintingResultsListener(client.getConfig(), outputFormat, width); watch.start(); client.runQuery(queryType, query, listener); int rows = listener.await(); System.out.println(String.format("%d record%s selected (%f seconds)", rows, rows > 1 ? "s" : "", (float) watch.elapsed(TimeUnit.MILLISECONDS) / (float) 1000)); if (query != queries[queries.length - 1]) { System.out.println(); } watch.stop(); watch.reset(); } return 0; }
From source file:org.activityinfo.server.endpoint.gwtrpc.AdvisoryLock.java
@Override public void close() throws Exception { Stopwatch stopwatch = Stopwatch.createStarted(); String sql = String.format("SELECT RELEASE_LOCK('%s')", ADVISORY_LOCK_NAME); Query query = entityManager.getSession().createSQLQuery(sql); Object result = query.uniqueResult(); int resultCode = ((Number) result).intValue(); if (resultCode != SUCCESS_CODE) { throw new RuntimeException("Failed to release lock, result code: " + resultCode); }/* w ww . j a v a2s . c om*/ stopwatch.stop(); LOGGER.finest("Release lock takes: " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + "ms"); }
From source file:com.amazon.kinesis.streaming.agent.tailing.PublishingQueue.java
public RecordBuffer<R> take(boolean block) { lock.lock();//from w w w.j a v a 2s .c o m try { if (block) { Stopwatch timer = Stopwatch.createStarted(); waitNotEmpty(); return tryTake(timer.elapsed(TimeUnit.MILLISECONDS)); } else { return tryTake(0); } } finally { lock.unlock(); } }