List of usage examples for com.google.common.base Stopwatch Stopwatch
Stopwatch()
From source file:org.apache.drill.exec.store.TimedRunnable.java
/** * Execute the list of runnables with the given parallelization. At end, return values and report completion time * stats to provided logger. Each runnable is allowed a certain timeout. If the timeout exceeds, existing/pending * tasks will be cancelled and a {@link UserException} is thrown. * @param activity Name of activity for reporting in logger. * @param logger The logger to use to report results. * @param runnables List of runnables that should be executed and timed. If this list has one item, task will be * completed in-thread. Runnable must handle {@link InterruptedException}s. * @param parallelism The number of threads that should be run to complete this task. * @return The list of outcome objects.//ww w . ja v a 2s. c om * @throws IOException All exceptions are coerced to IOException since this was build for storage system tasks initially. */ public static <V> List<V> run(final String activity, final Logger logger, final List<TimedRunnable<V>> runnables, int parallelism) throws IOException { Stopwatch watch = new Stopwatch().start(); long timedRunnableStart = System.nanoTime(); if (runnables.size() == 1) { parallelism = 1; runnables.get(0).run(); } else { parallelism = Math.min(parallelism, runnables.size()); final ExtendedLatch latch = new ExtendedLatch(runnables.size()); final ExecutorService threadPool = Executors.newFixedThreadPool(parallelism); try { for (TimedRunnable<V> runnable : runnables) { threadPool.submit(new LatchedRunnable(latch, runnable)); } final long timeout = (long) Math .ceil((TIMEOUT_PER_RUNNABLE_IN_MSECS * runnables.size()) / parallelism); if (!latch.awaitUninterruptibly(timeout)) { // Issue a shutdown request. This will cause existing threads to interrupt and pending threads to cancel. // It is highly important that the task Runnables are handling interrupts correctly. threadPool.shutdownNow(); try { // Wait for 5s for currently running threads to terminate. Above call (threadPool.shutdownNow()) interrupts // any running threads. If the runnables are handling the interrupts properly they should be able to // wrap up and terminate. If not waiting for 5s here gives a chance to identify and log any potential // thread leaks. threadPool.awaitTermination(5, TimeUnit.SECONDS); } catch (final InterruptedException e) { logger.warn("Interrupted while waiting for pending threads in activity '{}' to terminate.", activity); } final String errMsg = String.format( "Waited for %dms, but tasks for '%s' are not complete. " + "Total runnable size %d, parallelism %d.", timeout, activity, runnables.size(), parallelism); logger.error(errMsg); throw UserException.resourceError().message(errMsg).build(logger); } } finally { if (!threadPool.isShutdown()) { threadPool.shutdown(); } } } List<V> values = Lists.newArrayList(); long sum = 0; long max = 0; long count = 0; // measure thread creation times long earliestStart = Long.MAX_VALUE; long latestStart = 0; long totalStart = 0; IOException excep = null; for (final TimedRunnable<V> reader : runnables) { try { values.add(reader.getValue()); sum += reader.getTimeSpentNanos(); count++; max = Math.max(max, reader.getTimeSpentNanos()); earliestStart = Math.min(earliestStart, reader.getThreadStart() - timedRunnableStart); latestStart = Math.max(latestStart, reader.getThreadStart() - timedRunnableStart); totalStart += latestStart = Math.max(latestStart, reader.getThreadStart() - timedRunnableStart); } catch (IOException e) { if (excep == null) { excep = e; } else { excep.addSuppressed(e); } } } if (logger.isInfoEnabled()) { double avg = (sum / 1000.0 / 1000.0) / (count * 1.0d); double avgStart = (totalStart / 1000.0) / (count * 1.0d); logger.info( String.format( "%s: Executed %d out of %d using %d threads. " + "Time: %dms total, %fms avg, %dms max.", activity, count, runnables.size(), parallelism, watch.elapsed(TimeUnit.MILLISECONDS), avg, max / 1000 / 1000)); logger.info(String.format( "%s: Executed %d out of %d using %d threads. " + "Earliest start: %f \u03BCs, Latest start: %f \u03BCs, Average start: %f \u03BCs .", activity, count, runnables.size(), parallelism, earliestStart / 1000.0, latestStart / 1000.0, avgStart)); } if (excep != null) { throw excep; } return values; }
From source file:org.jnbis.imageio.WSQImageReader.java
private void processInput(final int imageIndex) { try {/*w w w.ja v a 2s. co m*/ if (imageIndex != 0) { throw new IndexOutOfBoundsException("imageIndex " + imageIndex); } /* Already processed */ if (image != null) { return; } final Object input = getInput(); if (input == null) { this.image = null; return; } if (!(input instanceof ImageInputStream)) { throw new IllegalArgumentException("bad input: " + input.getClass().getCanonicalName()); } final Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); log.debug("Input:{}", getInput()); final BitmapWithMetadata bitmap = WSQDecoder.decode((ImageInputStream) getInput()); stopwatch.stop(); //log.debug("Decode took: {}",stopwatch.elapsed(TimeUnit.MILLISECONDS)); metadata = new WSQMetadata(); for (final Map.Entry<String, String> entry : bitmap.getMetadata().entrySet()) { //System.out.println(entry.getKey() + ": " + entry.getValue()); metadata.setProperty(entry.getKey(), entry.getValue()); } for (final String s : bitmap.getComments()) { //System.out.println("//"+s); metadata.addComment(s); } image = new BufferedImage(bitmap.getWidth(), bitmap.getHeight(), BufferedImage.TYPE_BYTE_GRAY); final byte[] imageData = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); System.arraycopy(bitmap.getPixels(), 0, imageData, 0, bitmap.getLength()); } catch (final IOException ioe) { ioe.printStackTrace(); this.image = null; } }
From source file:org.apache.drill.exec.store.AffinityCalculator.java
/** * For a given RowGroup, calculate how many bytes are available on each on drillbit endpoint * * @param rowGroup the RowGroup to calculate endpoint bytes for *//*from ww w . j a v a 2 s . c o m*/ public void setEndpointBytes(ParquetGroupScan.RowGroupInfo rowGroup) { Stopwatch watch = new Stopwatch(); watch.start(); String fileName = rowGroup.getPath(); if (!blockMapMap.containsKey(fileName)) { buildBlockMap(fileName); } ImmutableRangeMap<Long, BlockLocation> blockMap = blockMapMap.get(fileName); HashMap<String, Long> hostMap = new HashMap<>(); HashMap<DrillbitEndpoint, Long> endpointByteMap = new HashMap(); long start = rowGroup.getStart(); long end = start + rowGroup.getLength(); Range<Long> rowGroupRange = Range.closedOpen(start, end); // Find submap of ranges that intersect with the rowGroup ImmutableRangeMap<Long, BlockLocation> subRangeMap = blockMap.subRangeMap(rowGroupRange); // Iterate through each block in this submap and get the host for the block location for (Map.Entry<Range<Long>, BlockLocation> block : subRangeMap.asMapOfRanges().entrySet()) { String[] hosts; Range<Long> blockRange = block.getKey(); try { hosts = block.getValue().getHosts(); } catch (IOException ioe) { throw new RuntimeException("Failed to get hosts for block location", ioe); } Range<Long> intersection = rowGroupRange.intersection(blockRange); long bytes = intersection.upperEndpoint() - intersection.lowerEndpoint(); // For each host in the current block location, add the intersecting bytes to the corresponding endpoint for (String host : hosts) { DrillbitEndpoint endpoint = getDrillBitEndpoint(host); if (endpointByteMap.containsKey(endpoint)) { endpointByteMap.put(endpoint, endpointByteMap.get(endpoint) + bytes); } else { if (endpoint != null) endpointByteMap.put(endpoint, bytes); } } } rowGroup.setEndpointBytes(endpointByteMap); rowGroup.setMaxBytes(endpointByteMap.size() > 0 ? Collections.max(endpointByteMap.values()) : 0); logger.debug("Row group ({},{}) max bytes {}", rowGroup.getPath(), rowGroup.getStart(), rowGroup.getMaxBytes()); watch.stop(); logger.debug("Took {} ms to set endpoint bytes", watch.elapsed(TimeUnit.MILLISECONDS)); }
From source file:edu.umkc.sce.App.java
public int run(String[] args) throws Exception { Configuration conf = getConf(); GenericOptionsParser parser = new GenericOptionsParser(conf, args); args = parser.getRemainingArgs();/*from w w w .j ava 2 s . c o m*/ if (args.length != 1) { GenericOptionsParser.printGenericCommandUsage(System.out); truncate(getAdmin().listTableNamesByNamespace(MY_NAMESPACE)); System.exit(2); } String importFile = args[0]; FileSystem fs = null; BufferedReader br = null; Model m = null; try { fs = FileSystem.get(conf); Path path = new Path(importFile); br = new BufferedReader(new InputStreamReader(fs.open(path))); m = createModel(); Stopwatch sw = new Stopwatch(); sw.start(); m.read(br, null, RDFLanguages.strLangNTriples); sw.stop(); System.out.printf("Loading '%s' took %d.\n", importFile, sw.elapsedTime(TimeUnit.MILLISECONDS)); sw.reset(); sw.start(); runTestQuery(m); sw.stop(); System.out.printf("Query '%s' took %d.\n", query, sw.elapsedTime(TimeUnit.MILLISECONDS)); sw.reset(); sw.start(); createStore(m); sw.stop(); System.out.printf("loadHbase took %d.\n", sw.elapsedTime(TimeUnit.MILLISECONDS)); } finally { if (m != null) m.close(); if (br != null) br.close(); if (fs != null) fs.close(); } return 0; }
From source file:org.apache.drill.exec.store.couchbase.CouchbaseRecordReader.java
@Override public int next() { Stopwatch watch = new Stopwatch(); watch.start();/* ww w. ja v a 2 s . c o m*/ keyVector.clear(); keyVector.allocateNew(); valueVector.clear(); valueVector.allocateNew(); int rowCount = 0; done: for (; rowCount < TARGET_RECORD_COUNT && tapClient.hasMoreMessages();) { ResponseMessage message = null; if (leftOver != null) { message = leftOver; leftOver = null; } else { if ((message = tapClient.getNextMessage()) == null) { continue; } } if (!keyVector.getMutator().setSafe(rowCount, message.getKey().getBytes())) { setOutputRowCount(rowCount); leftOver = message; break done; } if (!valueVector.getMutator().setSafe(rowCount, message.getValue())) { setOutputRowCount(rowCount); leftOver = message; break done; } rowCount++; } setOutputRowCount(rowCount); logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), rowCount); return rowCount; }
From source file:org.caleydo.view.tourguide.internal.compute.ComputeScoreJob.java
@Override public IStatus run(IProgressMonitor monitor) { if (data.isEmpty() || (groupMetrics.isEmpty() && groupScores.isEmpty() && stratScores.isEmpty() && stratMetrics.isEmpty())) return Status.OK_STATUS; final int total = data.keySet().size() + 1; monitor.beginTask("Compute LineUp Scores", total); log.info(//from www. j ava2s . c om "computing group similarity of %d against %d group scores, %d group metrics, %d stratification scores and %d stratification metrics", data.size(), groupScores.size(), groupMetrics.size(), stratScores.size(), stratMetrics.size()); Stopwatch w = new Stopwatch().start(); progress(0, "Initializing..."); for (IComputedStratificationScore score : Iterables.concat(stratMetrics, stratScores)) { score.getAlgorithm().init(monitor); if (Thread.interrupted() || monitor.isCanceled()) return Status.CANCEL_STATUS; } for (IComputedGroupScore score : Iterables.concat(groupMetrics, groupScores)) { score.getAlgorithm().init(monitor); if (Thread.interrupted() || monitor.isCanceled()) return Status.CANCEL_STATUS; } int c = 0; monitor.worked(1); progress(c++ / (float) total, "Computing..."); Iterator<IComputeElement> it = this.data.keySet().iterator(); // first time the one run to compute the progress frequency interval { IComputeElement as = it.next(); if (!run(monitor, as)) return Status.CANCEL_STATUS; monitor.worked(1); c++; } final int fireEvery = fireEvery(w.elapsedMillis()); int f = fireEvery - 1; while (it.hasNext()) { IComputeElement as = it.next(); if (f == 0) { progress(c / (float) total, "Computing " + as.getLabel()); f = fireEvery; } f--; if (!run(monitor, as)) return Status.CANCEL_STATUS; monitor.worked(1); c++; } System.out.println("done in " + w); monitor.done(); return Status.OK_STATUS; }
From source file:org.apache.tez.common.TezUtilsInternal.java
public static byte[] compressBytes(byte[] inBytes) throws IOException { Stopwatch sw = new Stopwatch().start(); byte[] compressed = compressBytesInflateDeflate(inBytes); sw.stop();/*from www .jav a2 s .c o m*/ if (LOG.isDebugEnabled()) { LOG.debug("UncompressedSize: " + inBytes.length + ", CompressedSize: " + compressed.length + ", CompressTime: " + sw.elapsedMillis()); } return compressed; }
From source file:com.github.joshelser.YcsbBatchScanner.java
private void _run() throws Exception { log.info("Computing ranges"); // numRanges// ww w . j a va2s . co m List<Range> ranges = computeRanges(); log.info("All ranges calculated: {} ranges found", ranges.size()); for (int i = 0; i < numIterations; i++) { List<List<Range>> partitionedRanges = Lists.partition(ranges, numRangesPerPartition); log.info("Executing {} range partitions using a pool of {} threads", partitionedRanges.size(), threadPoolSize); List<Future<Integer>> results = new ArrayList<>(); Stopwatch sw = new Stopwatch(); sw.start(); for (List<Range> partition : partitionedRanges) { // results.add(this.svc.submit(new BatchScannerQueryTask(conn, partition))); results.add(this.svc.submit(new ScannerQueryTask(conn, partition))); } for (Future<Integer> result : results) { log.debug("Found {} results", result.get()); } sw.stop(); log.info("Queries executed in {} ms", sw.elapsed(TimeUnit.MILLISECONDS)); } }
From source file:org.caleydo.view.tourguide.impl.GSEAAlgorithm.java
private Map<Integer, Float> rankedSet(RankedSet inA) { Stopwatch w = new Stopwatch().start(); ATableBasedDataDomain dataDomain = (ATableBasedDataDomain) perspective.getDataDomain(); Table table = dataDomain.getTable(); List<Integer> rows = perspective.getVirtualArray().getIDs(); List<Integer> cols = table.getDefaultDimensionPerspective(false).getVirtualArray().getIDs(); // # Compute observed and random permutation gene rankings //// w w w. j av a 2s . c om // obs.s2n <- vector(length=N, mode="numeric") // signal.strength <- vector(length=Ng, mode="numeric") // tag.frac <- vector(length=Ng, mode="numeric") // gene.frac <- vector(length=Ng, mode="numeric") // coherence.ratio <- vector(length=Ng, mode="numeric") // obs.phi.norm <- matrix(nrow = Ng, ncol = nperm) // correl.matrix <- matrix(nrow = N, ncol = nperm) // obs.correl.matrix <- matrix(nrow = N, ncol = nperm) // order.matrix <- matrix(nrow = N, ncol = nperm) // obs.order.matrix <- matrix(nrow = N, ncol = nperm) // // nperm.per.call <- 100 // n.groups <- nperm %/% nperm.per.call // n.rem <- nperm %% nperm.per.call // n.perms <- c(rep(nperm.per.call, n.groups), n.rem) // n.ends <- cumsum(n.perms) // n.starts <- n.ends - n.perms + 1 // // if (n.rem == 0) { // n.tot <- n.groups // } else { // n.tot <- n.groups + 1 // } // // for (nk in 1:n.tot) { // call.nperm <- n.perms[nk] // // print(paste("Computing ranked list for actual and permuted phenotypes.......permutations: ", n.starts[nk], // "--", n.ends[nk], sep=" ")) // // O <- GSEA.GeneRanking(A, class.labels, gene.labels, call.nperm, permutation.type = perm.type, // sigma.correction = "GeneCluster", fraction=fraction, replace=replace, reverse.sign = reverse.sign) // gc() // // order.matrix[,n.starts[nk]:n.ends[nk]] <- O$order.matrix // obs.order.matrix[,n.starts[nk]:n.ends[nk]] <- O$obs.order.matrix // correl.matrix[,n.starts[nk]:n.ends[nk]] <- O$s2n.matrix // obs.correl.matrix[,n.starts[nk]:n.ends[nk]] <- O$obs.s2n.matrix // rm(O) // } // // obs.s2n <- apply(obs.correl.matrix, 1, median) # using median to assign enrichment scores // obs.index <- order(obs.s2n, decreasing=T) // obs.s2n <- sort(obs.s2n, decreasing=T) for (Integer col : cols) { //String gene = table.getDataDomain().getDimensionLabel(col); // Set<Integer> prim = dim2primary.apply(col); // if (prim == null || prim.isEmpty()) { // // System.out.println(gene.trim()); // } else if (seen.contains(gene)) { // // System.out.println("dup: " + gene); // } else { // seen.add(gene); //System.out.println("good: " + gene); // } for (Integer row : rows) { Float v = table.getRaw(col, row); if (v == null || v.isNaN() || v.isInfinite()) continue; // boolean neg = v < 0; // v = (float) Math.log(abs(v)); // if (neg) // v = -v; inA.add(row, v); } inA.flush(col); // System.out.println(table.getDataDomain().getDimensionLabel(col) + " " + inA.correlation.get(col)); } System.out.println(w); return inA.toSignal2Noise(dim2primary); }
From source file:org.apache.accumulo.master.replication.RemoveCompleteReplicationRecords.java
@Override public void run() { BatchScanner bs;// w w w.j av a 2 s . co m BatchWriter bw; try { bs = ReplicationTable.getBatchScanner(conn, 4); bw = ReplicationTable.getBatchWriter(conn); if (bs == null || bw == null) throw new AssertionError( "Inconceivable; an exception should have been thrown, but 'bs' or 'bw' was null instead"); } catch (ReplicationTableOfflineException e) { log.debug("Not attempting to remove complete replication records as the table ({}) isn't yet online", ReplicationTable.NAME); return; } bs.setRanges(Collections.singleton(new Range())); IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class); StatusSection.limit(bs); WorkSection.limit(bs); bs.addScanIterator(cfg); Stopwatch sw = new Stopwatch(); long recordsRemoved = 0; try { sw.start(); recordsRemoved = removeCompleteRecords(conn, bs, bw); } finally { if (null != bs) { bs.close(); } if (null != bw) { try { bw.close(); } catch (MutationsRejectedException e) { log.error("Error writing mutations to {}, will retry", ReplicationTable.NAME, e); } } sw.stop(); } log.info("Removed {} complete replication entries from the table {}", recordsRemoved, ReplicationTable.NAME); }