List of usage examples for com.google.common.base Stopwatch Stopwatch
Stopwatch()
From source file:org.apache.drill.exec.server.Drillbit.java
public void run() throws Exception { final Stopwatch w = new Stopwatch().start(); logger.debug("Startup begun."); coord.start(10000);/* w w w . j a v a 2s .c o m*/ storeProvider.start(); final DrillbitEndpoint md = engine.start(); manager.start(md, engine.getController(), engine.getDataConnectionCreator(), coord, storeProvider); final DrillbitContext drillbitContext = manager.getContext(); drillbitContext.getStorage().init(); drillbitContext.getOptionManager().init(); javaPropertiesToSystemOptions(); registrationHandle = coord.register(md); startJetty(); Runtime.getRuntime().addShutdownHook(new ShutdownThread(this, new StackTrace())); logger.info("Startup completed ({} ms).", w.elapsed(TimeUnit.MILLISECONDS)); }
From source file:org.apache.drill.exec.physical.impl.xsort.ExternalSortBatch.java
@Override public IterOutcome innerNext() { if (schema != null) { if (spillCount == 0) { return (getSelectionVector4().next()) ? IterOutcome.OK : IterOutcome.NONE; } else {/* w w w. ja va2s . com*/ Stopwatch w = new Stopwatch(); w.start(); int count = copier.next(targetRecordCount); if (count > 0) { long t = w.elapsed(TimeUnit.MICROSECONDS); logger.debug("Took {} us to merge {} records", t, count); container.setRecordCount(count); return IterOutcome.OK; } else { logger.debug("copier returned 0 records"); return IterOutcome.NONE; } } } int totalCount = 0; try { container.clear(); outer: while (true) { Stopwatch watch = new Stopwatch(); watch.start(); IterOutcome upstream; if (first) { upstream = IterOutcome.OK_NEW_SCHEMA; } else { upstream = next(incoming); } if (upstream == IterOutcome.OK && sorter == null) { upstream = IterOutcome.OK_NEW_SCHEMA; } // logger.debug("Took {} us to get next", watch.elapsed(TimeUnit.MICROSECONDS)); switch (upstream) { case NONE: if (first) { return upstream; } break outer; case NOT_YET: throw new UnsupportedOperationException(); case STOP: return upstream; case OK_NEW_SCHEMA: // only change in the case that the schema truly changes. Artificial schema changes are ignored. if (!incoming.getSchema().equals(schema)) { if (schema != null) { throw new SchemaChangeException(); } this.schema = incoming.getSchema(); this.sorter = createNewSorter(context, incoming); } // fall through. case OK: if (first) { first = false; } if (incoming.getRecordCount() == 0) { for (VectorWrapper w : incoming) { w.clear(); } break; } totalSizeInMemory += getBufferSize(incoming); SelectionVector2 sv2; if (incoming.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.TWO_BYTE) { sv2 = incoming.getSelectionVector2(); if (sv2.getBuffer(false).isRootBuffer()) { oContext.getAllocator().takeOwnership(sv2.getBuffer(false)); } } else { try { sv2 = newSV2(); } catch (InterruptedException e) { return IterOutcome.STOP; } catch (OutOfMemoryException e) { throw new OutOfMemoryRuntimeException(e); } } int count = sv2.getCount(); totalCount += count; sorter.setup(context, sv2, incoming); Stopwatch w = new Stopwatch(); w.start(); sorter.sort(sv2); // logger.debug("Took {} us to sort {} records", w.elapsed(TimeUnit.MICROSECONDS), count); RecordBatchData rbd = new RecordBatchData(incoming); boolean success = false; try { if (incoming.getSchema().getSelectionVectorMode() == SelectionVectorMode.NONE) { rbd.setSv2(sv2); } batchGroups.add(new BatchGroup(rbd.getContainer(), rbd.getSv2())); batchesSinceLastSpill++; if (// We have spilled at least once and the current memory used is more than the 75% of peak memory used. (spillCount > 0 && totalSizeInMemory > .75 * highWaterMark) || // If we haven't spilled so far, do we have enough memory for MSorter if this turns out to be the last incoming batch? (spillCount == 0 && !hasMemoryForInMemorySort(totalCount)) || // current memory used is more than 95% of memory usage limit of this operator (totalSizeInMemory > .95 * popConfig.getMaxAllocation()) || // current memory used is more than 95% of memory usage limit of this fragment (totalSizeInMemory > .95 * oContext.getAllocator().getFragmentLimit()) || // Number of incoming batches (BatchGroups) exceed the limit and number of incoming batches accumulated // since the last spill exceed the defined limit (batchGroups.size() > SPILL_THRESHOLD && batchesSinceLastSpill >= SPILL_BATCH_GROUP_SIZE)) { if (firstSpillBatchCount == 0) { firstSpillBatchCount = batchGroups.size(); } if (spilledBatchGroups.size() > firstSpillBatchCount / 2) { logger.info("Merging spills"); spilledBatchGroups.addFirst(mergeAndSpill(spilledBatchGroups)); } spilledBatchGroups.add(mergeAndSpill(batchGroups)); batchesSinceLastSpill = 0; } long t = w.elapsed(TimeUnit.MICROSECONDS); // logger.debug("Took {} us to sort {} records", t, count); success = true; } finally { if (!success) { rbd.clear(); } } break; case OUT_OF_MEMORY: logger.debug("received OUT_OF_MEMORY, trying to spill"); highWaterMark = totalSizeInMemory; if (batchesSinceLastSpill > 2) { spilledBatchGroups.add(mergeAndSpill(batchGroups)); batchesSinceLastSpill = 0; } else { logger.debug("not enough batches to spill, sending OUT_OF_MEMORY downstream"); return IterOutcome.OUT_OF_MEMORY; } break; default: throw new UnsupportedOperationException(); } } if (totalCount == 0) { return IterOutcome.NONE; } if (spillCount == 0) { Stopwatch watch = new Stopwatch(); watch.start(); if (builder != null) { builder.clear(); builder.close(); } builder = new SortRecordBatchBuilder(oContext.getAllocator(), MAX_SORT_BYTES); for (BatchGroup group : batchGroups) { RecordBatchData rbd = new RecordBatchData(group.getContainer()); rbd.setSv2(group.getSv2()); builder.add(rbd); } builder.build(context, container); sv4 = builder.getSv4(); mSorter = createNewMSorter(); mSorter.setup(context, oContext.getAllocator(), getSelectionVector4(), this.container); // For testing memory-leak purpose, inject exception after mSorter finishes setup injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SETUP); mSorter.sort(this.container); // sort may have prematurely exited due to should continue returning false. if (!context.shouldContinue()) { return IterOutcome.STOP; } // For testing memory-leak purpose, inject exception after mSorter finishes sorting injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_AFTER_SORT); sv4 = mSorter.getSV4(); long t = watch.elapsed(TimeUnit.MICROSECONDS); // logger.debug("Took {} us to sort {} records", t, sv4.getTotalCount()); container.buildSchema(SelectionVectorMode.FOUR_BYTE); } else { spilledBatchGroups.add(mergeAndSpill(batchGroups)); batchGroups.addAll(spilledBatchGroups); logger.warn("Starting to merge. {} batch groups. Current allocated memory: {}", batchGroups.size(), oContext.getAllocator().getAllocatedMemory()); VectorContainer hyperBatch = constructHyperBatch(batchGroups); createCopier(hyperBatch, batchGroups, container, false); int estimatedRecordSize = 0; for (VectorWrapper w : batchGroups.get(0)) { try { estimatedRecordSize += TypeHelper.getSize(w.getField().getType()); } catch (UnsupportedOperationException e) { estimatedRecordSize += 50; } } targetRecordCount = Math.min(MAX_BATCH_SIZE, Math.max(1, 250 * 1000 / estimatedRecordSize)); int count = copier.next(targetRecordCount); container.buildSchema(SelectionVectorMode.NONE); container.setRecordCount(count); } return IterOutcome.OK_NEW_SCHEMA; } catch (SchemaChangeException ex) { kill(false); context.fail(UserException.unsupportedError(ex) .message("Sort doesn't currently support sorts with changing schemas").build(logger)); return IterOutcome.STOP; } catch (ClassTransformationException | IOException ex) { kill(false); context.fail(ex); return IterOutcome.STOP; } catch (UnsupportedOperationException e) { throw new RuntimeException(e); } }
From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java
public void testScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException { Stopwatch scanOpenTimer = new Stopwatch(); Stopwatch scanTimer = new Stopwatch(); Scan scan = getScan();/*from w w w . ja v a2s .c o m*/ String jobName = "testScanMapReduce"; Job job = new Job(conf); job.setJobName(jobName); job.setJarByClass(getClass()); TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class, NullWritable.class, job); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); job.setOutputValueClass(NullWritable.class); job.setOutputFormatClass(NullOutputFormat.class); scanTimer.start(); job.waitForCompletion(true); scanTimer.stop(); Counters counters = job.getCounters(); long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue(); long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS); double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS); double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms"); System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms"); System.out.println( "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); }
From source file:co.cask.cdap.data2.transaction.stream.AbstractStreamFileConsumer.java
@Override public final DequeueResult<StreamEvent> poll(int maxEvents, long timeout, TimeUnit timeoutUnit) throws IOException, InterruptedException { // Only need the CLAIMED state for FIFO with group size > 1. byte[] fifoStateContent = null; if (consumerConfig.getDequeueStrategy() == DequeueStrategy.FIFO && consumerConfig.getGroupSize() > 1) { fifoStateContent = encodeStateColumn(ConsumerEntryState.CLAIMED); }/*from w w w. j a va 2s . c o m*/ // Try to read from cache if any if (!eventCache.isEmpty()) { getEvents(eventCache, polledEvents, maxEvents, fifoStateContent); } if (polledEvents.size() == maxEvents) { return new SimpleDequeueResult(polledEvents); } // Number of events it tries to read by multiply the maxEvents with the group size. It doesn't have to be exact, // just a rough estimate for better read throughput. // Also, this maxRead is used throughout the read loop below, hence some extra events might be read and cached // for next poll call. int maxRead = maxEvents * consumerConfig.getGroupSize(); long timeoutNano = timeoutUnit.toNanos(timeout); Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); // Save the reader position. // It's a conservative approach to save the reader position before reading so that no // event will be missed upon restart. consumerState.setState(reader.getPosition()); // Read from the underlying file reader while (polledEvents.size() < maxEvents) { int readCount = reader.read(eventCache, maxRead, timeoutNano, TimeUnit.NANOSECONDS, readFilter); long elapsedNano = stopwatch.elapsedTime(TimeUnit.NANOSECONDS); timeoutNano -= elapsedNano; if (readCount > 0) { int eventsClaimed = getEvents(eventCache, polledEvents, maxEvents - polledEvents.size(), fifoStateContent); // TODO: This is a quick fix for preventing backoff logic in flowlet drive kicks in too early. // But it doesn't entirely prevent backoff. A proper fix would have a special state in the dequeue result // to let flowlet driver knows it shouldn't have backoff. // If able to read some events but nothing is claimed, don't check for normal timeout. // Only do short transaction timeout checks. if (eventsClaimed == 0 && polledEvents.isEmpty()) { if (elapsedNano < (txTimeoutNano / 2)) { // If still last than half of tx timeout, continue polling without checking normal timeout. continue; } } } if (timeoutNano <= 0) { break; } } if (polledEvents.isEmpty()) { return EMPTY_RESULT; } else { return new SimpleDequeueResult(polledEvents); } }
From source file:com.springer.omelet.mail.Email.java
@Override public List<Message> filerEmailsBySubject(List<Message> message, String emailSubject) { Stopwatch sw = new Stopwatch(); sw.start();// w w w . j av a 2 s . c o m List<Message> returnMessage = new ArrayList<Message>(); LOGGER.info("Count of the message for filter by Subject" + message.size()); for (Message msg : message) { try { if (msg.getSubject().equalsIgnoreCase(emailSubject)) { returnMessage.add(msg); } } catch (MessagingException e) { // TODO Auto-generated catch block LOGGER.error(e); } } sw.stop(); LOGGER.info("Time Taken by Filter EmailBy Subjects is:" + sw.elapsedTime(TimeUnit.SECONDS)); return returnMessage; }
From source file:itemrecommendations.CFResourceCalculator.java
private static List<Map<Integer, Double>> startBM25CreationForResourcesPrediction(BookmarkReader reader, int sampleSize, boolean userBased, boolean resBased, boolean allResources, boolean bll, Features features) {/*ww w. j a v a2 s . c om*/ int size = reader.getBookmarks().size(); int trainSize = size - sampleSize; Stopwatch timer = new Stopwatch(); timer.start(); CFResourceCalculator calculator = new CFResourceCalculator(reader, trainSize, false, userBased, resBased, 5, Similarity.COSINE, features); timer.stop(); long trainingTime = timer.elapsed(TimeUnit.MILLISECONDS); timer.reset(); timer.start(); List<Map<Integer, Double>> results = new ArrayList<Map<Integer, Double>>(); for (Integer userID : reader.getUniqueUserListFromTestSet(trainSize)) { Map<Integer, Double> map = null; map = calculator.getRankedResourcesList(userID, -1, true, allResources, bll, true, false); // TODO results.add(map); } timer.stop(); long testTime = timer.elapsed(TimeUnit.MILLISECONDS); timeString = PerformanceMeasurement.addTimeMeasurement(timeString, true, trainingTime, testTime, sampleSize); return results; }
From source file:org.apache.drill.exec.physical.impl.TopN.TopNBatch.java
private void purge() throws SchemaChangeException { Stopwatch watch = new Stopwatch(); watch.start();/*from ww w . j a v a2 s . c om*/ VectorContainer c = priorityQueue.getHyperBatch(); VectorContainer newContainer = new VectorContainer(oContext); SelectionVector4 selectionVector4 = priorityQueue.getHeapSv4(); SimpleRecordBatch batch = new SimpleRecordBatch(c, selectionVector4, context); SimpleRecordBatch newBatch = new SimpleRecordBatch(newContainer, null, context); if (copier == null) { copier = RemovingRecordBatch.getGenerated4Copier(batch, context, oContext.getAllocator(), newContainer, newBatch, null); } else { for (VectorWrapper<?> i : batch) { ValueVector v = TypeHelper.getNewVector(i.getField(), oContext.getAllocator()); newContainer.add(v); } copier.setupRemover(context, batch, newBatch); } SortRecordBatchBuilder builder = new SortRecordBatchBuilder(oContext.getAllocator(), MAX_SORT_BYTES); try { do { int count = selectionVector4.getCount(); int copiedRecords = copier.copyRecords(0, count); assert copiedRecords == count; for (VectorWrapper<?> v : newContainer) { ValueVector.Mutator m = v.getValueVector().getMutator(); m.setValueCount(count); } newContainer.buildSchema(BatchSchema.SelectionVectorMode.NONE); newContainer.setRecordCount(count); builder.add(newBatch); } while (selectionVector4.next()); selectionVector4.clear(); c.clear(); VectorContainer newQueue = new VectorContainer(); builder.canonicalize(); builder.build(context, newQueue); priorityQueue.resetQueue(newQueue, builder.getSv4().createNewWrapperCurrent()); builder.getSv4().clear(); selectionVector4.clear(); } finally { builder.close(); } logger.debug("Took {} us to purge", watch.elapsed(TimeUnit.MICROSECONDS)); }
From source file:com.twitter.hraven.rest.RestJSONResource.java
@GET @Path("flow/{cluster}/{user}/{appId}/{version}") @Produces(MediaType.APPLICATION_JSON)//www. j ava 2s . c o m public List<Flow> getJobFlowById(@PathParam("cluster") String cluster, @PathParam("user") String user, @PathParam("appId") String appId, @PathParam("version") String version, @QueryParam("limit") int limit, @QueryParam("startTime") long startTime, @QueryParam("endTime") long endTime, @QueryParam("include") List<String> include, @QueryParam("includeConf") List<String> includeConfig, @QueryParam("includeConfRegex") List<String> includeConfigRegex, @QueryParam("includeJobField") List<String> includeJobFields) throws IOException { Stopwatch timer = new Stopwatch().start(); if (startTime == 0) { // look back one month startTime = System.currentTimeMillis() - Constants.THIRTY_DAYS_MILLIS; } if (endTime == 0) { // default to now endTime = System.currentTimeMillis(); } Predicate<String> configFilter = null; if (includeConfig != null && !includeConfig.isEmpty()) { configFilter = new SerializationContext.FieldNameFilter(includeConfig); } else if (includeConfigRegex != null && !includeConfigRegex.isEmpty()) { configFilter = new SerializationContext.RegexConfigurationFilter(includeConfigRegex); } Predicate<String> jobFilter = null; if (includeJobFields != null && !includeJobFields.isEmpty()) { jobFilter = new SerializationContext.FieldNameFilter(includeJobFields); } Predicate<String> flowFilter = null; if (include != null && !include.isEmpty()) { flowFilter = new SerializationContext.FieldNameFilter(include); } serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING, configFilter, flowFilter, jobFilter, null)); List<Flow> flows = getFlowList(cluster, user, appId, version, startTime, endTime, limit); timer.stop(); StringBuilder builderIncludeConfigs = new StringBuilder(); for (String s : includeConfig) { builderIncludeConfigs.append(s); } StringBuilder builderIncludeConfigRegex = new StringBuilder(); for (String s : includeConfig) { builderIncludeConfigRegex.append(s); } if (flows != null) { LOG.info("For flow/{cluster}/{user}/{appId}/{version} with input query: " + "flow/" + cluster + SLASH + user + SLASH + appId + SLASH + version + "?limit=" + limit + " startTime=" + startTime + " endTime=" + endTime + " &includeConf=" + builderIncludeConfigs + " &includeConfRegex=" + builderIncludeConfigRegex + StringUtil.buildParam("includeJobField", includeJobFields) + "&" + StringUtil.buildParam("include", include) + " fetched " + flows.size() + " flows " + " in " + timer); } else { LOG.info("For flow/{cluster}/{user}/{appId}/{version} with input query: " + "flow/" + cluster + SLASH + user + SLASH + appId + SLASH + version + "?limit=" + limit + " startTime=" + startTime + " endTime=" + endTime + " &includeConf=" + builderIncludeConfigs + "&includeConfRegex=" + builderIncludeConfigRegex + StringUtil.buildParam("includeJobField", includeJobFields) + "&" + StringUtil.buildParam("include", include) + " No flows fetched, spent " + timer); } // export latency metrics HravenResponseMetrics.FLOW_VERSION_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS)); return flows; }
From source file:co.cask.cdap.client.MetricsClient.java
/** * Gets the {@link RuntimeMetrics} for a particular metrics context. * * @param tags the metrics tags/*from w w w.j a v a 2 s .c om*/ * @param inputName the metrics key for input counter * @param processedName the metrics key for processed counter * @param exceptionName the metrics key for exception counter * @return the {@link RuntimeMetrics} */ private RuntimeMetrics getMetrics(final Map<String, String> tags, final String inputName, final String processedName, final String exceptionName) { return new RuntimeMetrics() { @Override public long getInput() { return getTotalCounter(tags, inputName); } @Override public long getProcessed() { return getTotalCounter(tags, processedName); } @Override public long getException() { return getTotalCounter(tags, exceptionName); } @Override public void waitForinput(long count, long timeout, TimeUnit timeoutUnit) throws TimeoutException, InterruptedException { doWaitFor(inputName, count, timeout, timeoutUnit); } @Override public void waitForProcessed(long count, long timeout, TimeUnit timeoutUnit) throws TimeoutException, InterruptedException { doWaitFor(processedName, count, timeout, timeoutUnit); } @Override public void waitForException(long count, long timeout, TimeUnit timeoutUnit) throws TimeoutException, InterruptedException { doWaitFor(exceptionName, count, timeout, timeoutUnit); } @Override public void waitFor(String name, long count, long timeout, TimeUnit timeoutUnit) throws TimeoutException, InterruptedException { doWaitFor(name, count, timeout, timeoutUnit); } private void doWaitFor(String name, long count, long timeout, TimeUnit timeoutUnit) throws TimeoutException, InterruptedException { long value = getTotalCounter(tags, name); // Min sleep time is 10ms, max sleep time is 1 seconds long sleepMillis = Math.max(10, Math.min(timeoutUnit.toMillis(timeout) / 10, TimeUnit.SECONDS.toMillis(1))); Stopwatch stopwatch = new Stopwatch().start(); while (value < count && stopwatch.elapsedTime(timeoutUnit) < timeout) { TimeUnit.MILLISECONDS.sleep(sleepMillis); value = getTotalCounter(tags, name); } if (value < count) { throw new TimeoutException( "Time limit reached. Got '" + value + "' instead of '" + count + "'"); } } @Override public String toString() { return String.format("%s; tags=%d, processed=%d, exception=%d", Joiner.on(",").withKeyValueSeparator(":").join(tags), getInput(), getProcessed(), getException()); } }; }
From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.SparqlLogicConceptMatcher.java
/** * Obtains all the matching resources that have a MatchType with the URIs of {@code origin} of the type provided (inclusive) or more. * * @param origins URIs to match/* ww w .j a va 2 s . c o m*/ * @param minType the minimum MatchType we want to obtain * @return a {@link com.google.common.collect.Table} with the result of the matching indexed by origin URI and then destination URI. */ @Override public Table<URI, URI, MatchResult> listMatchesAtLeastOfType(Set<URI> origins, MatchType minType) { Table<URI, URI, MatchResult> matchTable = HashBasedTable.create(); Stopwatch w = new Stopwatch(); for (URI origin : origins) { w.start(); Map<URI, MatchResult> result = listMatchesAtLeastOfType(origin, minType); for (Map.Entry<URI, MatchResult> dest : result.entrySet()) { matchTable.put(origin, dest.getKey(), dest.getValue()); } log.debug("Computed matched types for {} in {}. {} total matches.", origin, w.stop().toString(), result.size()); w.reset(); } return matchTable; // return obtainMatchResults(origins, minType, this.getMatchTypesSupported().getHighest()); // TODO: Use the proper implementation for this }