List of usage examples for com.google.common.base Stopwatch start
public Stopwatch start()
From source file:cosmos.mapred.MediawikiQueries.java
public long columnFetch(Store id, Column colToFetch, Map<Column, Long> counts, long totalResults) throws Exception { Stopwatch sw = new Stopwatch(); String prev = null;/*w w w .j a v a2 s . c o m*/ String lastDocId = null; long resultCount = 0l; sw.start(); final CloseableIterable<MultimapRecord> results = this.sorts.fetch(id, Index.define(colToFetch)); Iterator<MultimapRecord> resultsIter = results.iterator(); for (; resultsIter.hasNext();) { MultimapRecord r = resultsIter.next(); sw.stop(); resultCount++; Collection<RecordValue<?>> values = r.get(colToFetch); TreeSet<RecordValue<?>> sortedValues = Sets.newTreeSet(values); if (null == prev) { prev = sortedValues.first().value().toString(); } else { boolean plausible = false; Iterator<RecordValue<?>> iter = sortedValues.iterator(); for (; !plausible && iter.hasNext();) { String val = iter.next().value().toString(); if (prev.compareTo(val) <= 0) { plausible = true; } } if (!plausible) { System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - " + lastDocId + " shouldn't have come before " + r.docId()); System.out.println(prev + " compared to " + sortedValues); results.close(); System.exit(1); } } lastDocId = r.docId(); sw.start(); } sw.stop(); System.out.println(Thread.currentThread().getName() + ": " + colToFetch + " - Took " + sw.toString() + " to fetch results"); logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "fetch:" + colToFetch); results.close(); long expected = counts.containsKey(colToFetch) ? counts.get(colToFetch) : -1; if (resultCount != expected) { System.out.println(Thread.currentThread().getName() + " " + colToFetch + ": Expected to get " + expected + " records but got " + resultCount); System.exit(1); } return resultCount; }
From source file:co.cask.cdap.data.stream.StreamDataFileReader.java
@Override public int read(Collection<? super PositionStreamEvent> events, int maxEvents, long timeout, TimeUnit unit, ReadFilter readFilter) throws IOException, InterruptedException { if (closed) { throw new IOException("Reader already closed."); }//from w w w . j av a 2s . co m int eventCount = 0; long sleepNano = computeSleepNano(timeout, unit); try { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); // Keep reading events until max events. while (!eof && eventCount < maxEvents) { try { if (eventInput == null) { doOpen(); } PositionStreamEvent event = nextStreamEvent(readFilter); if (event != null) { events.add(event); eventCount++; } else if (eof) { break; } } catch (IOException e) { if (eventInput != null) { eventInput.close(); eventInput = null; } if (!(e instanceof EOFException || e instanceof FileNotFoundException)) { throw e; } // If end of stream file or no timeout is allowed, break the loop. if (eof || timeout <= 0) { break; } if (stopwatch.elapsedTime(unit) >= timeout) { break; } TimeUnit.NANOSECONDS.sleep(sleepNano); if (stopwatch.elapsedTime(unit) >= timeout) { break; } } } return (eventCount == 0 && eof) ? -1 : eventCount; } catch (IOException e) { close(); throw e; } }
From source file:org.n52.lod.csw.CSWLoDEnabler.java
public void asyncRunStartingFrom(final int startPos) throws IOException { log.info("STARTING CSW to LOD.."); if (!(addToServer || saveToFile)) { log.warn("Neither triple store nor file output are activated."); return;/* w w w . j av a 2 s. co m*/ } final Stopwatch overallTimer = new Stopwatch(); overallTimer.start(); final Stopwatch retrievingTimer = new Stopwatch(); final Stopwatch mappingTimer = new Stopwatch(); final Stopwatch otherTimer = new Stopwatch(); otherTimer.start(); XmlToRdfMapper mapper = new GluesMapper(config); TripleSink serverSink = null; if (addToServer) { try { serverSink = new VirtuosoServer(config, mapper); } catch (RuntimeException e) { log.error("Could not connect to graph", e); } } TripleSink fileSink = null; if (saveToFile) { fileSink = new FileTripleSink(mapper); } long recordsInTotal; try { recordsInTotal = csw.getNumberOfRecords(); log.debug("Retrieved number of records from server: {}", recordsInTotal); } catch (IllegalStateException | HttpClientException | XmlException e) { log.error("Could not retrieve number of records from catalog {}, falling back to {}", csw, FALLBACK_RECORDS_TOTAL, e); recordsInTotal = FALLBACK_RECORDS_TOTAL; } report.startIndex = startPos; report.recordNumber = recordsInTotal; otherTimer.stop(); async(startPos, recordsInTotal, overallTimer, retrievingTimer, mappingTimer, serverSink, fileSink); otherTimer.start(); if (fileSink != null) try { fileSink.close(); } catch (Exception e) { log.error("Could not close file sink {}", fileSink, e); } if (serverSink != null) try { serverSink.close(); } catch (Exception e) { log.error("Could not close server sink {}", serverSink, e); } if (!report.issues.isEmpty()) log.error(report.extendedToString()); overallTimer.stop(); otherTimer.stop(); log.info("DONE with CSW to LOD.. duration = {} (retrieving: {}, mapping = {}, other = {})", overallTimer, retrievingTimer, mappingTimer, otherTimer); log.info("Results: {}", report); log.info("Sinks: server = {}, file = {}", addToServer, saveToFile); log.info("Server: {} | File: {}", serverSink, fileSink); }
From source file:com.palantir.common.base.PrefetchingBatchingVisitable.java
@Override public <K extends Exception> boolean batchAccept(final int batchSize, AbortingVisitor<? super List<T>, K> v) throws K { final Queue<List<T>> queue = Queues.newArrayDeque(); final Lock lock = new ReentrantLock(); final Condition itemAvailable = lock.newCondition(); final Condition spaceAvailable = lock.newCondition(); final AtomicBoolean futureIsDone = new AtomicBoolean(false); final AtomicReference<Throwable> exception = new AtomicReference<Throwable>(); final Stopwatch fetchTime = Stopwatch.createUnstarted(); final Stopwatch fetchBlockedTime = Stopwatch.createUnstarted(); final Stopwatch visitTime = Stopwatch.createUnstarted(); final Stopwatch visitBlockedTime = Stopwatch.createUnstarted(); Future<?> future = exec.submit(new Runnable() { @Override//from www .j ava 2 s .co m public void run() { try { fetchTime.start(); delegate.batchAccept(batchSize, new AbortingVisitor<List<T>, InterruptedException>() { @Override public boolean visit(List<T> item) throws InterruptedException { fetchTime.stop(); fetchBlockedTime.start(); lock.lock(); try { while (queue.size() >= capacity) { spaceAvailable.await(); } fetchBlockedTime.stop(); queue.add(item); itemAvailable.signalAll(); } finally { lock.unlock(); } fetchTime.start(); return true; } }); fetchTime.stop(); } catch (InterruptedException e) { // shutting down } catch (Throwable t) { exception.set(t); } finally { if (fetchTime.isRunning()) { fetchTime.stop(); } if (fetchBlockedTime.isRunning()) { fetchBlockedTime.stop(); } lock.lock(); try { futureIsDone.set(true); itemAvailable.signalAll(); } finally { lock.unlock(); } } } }); try { while (true) { List<T> batch; visitBlockedTime.start(); lock.lock(); try { while (queue.isEmpty()) { if (futureIsDone.get()) { if (exception.get() != null) { throw Throwables.rewrapAndThrowUncheckedException(exception.get()); } return true; } itemAvailable.await(); } batch = queue.poll(); spaceAvailable.signalAll(); } finally { lock.unlock(); } visitBlockedTime.stop(); visitTime.start(); boolean proceed = v.visit(batch); visitTime.stop(); if (!proceed) { return false; } } } catch (InterruptedException e) { throw Throwables.rewrapAndThrowUncheckedException(e); } finally { log.debug("{} timings: fetch {}, fetchBlocked {}, visit {}, visitBlocked {}", name, fetchTime, fetchBlockedTime, visitTime, visitBlockedTime); future.cancel(true); } }
From source file:org.n52.lod.csw.CSWLoDEnabler.java
/** * executes the program: 1.) retrieves the record descriptions from the CSW * 2.) transforms the descriptions to RDF 3.) inserts the produced RDF into * the triplestore/*from w ww.j a v a 2 s . co m*/ * * @param startPos * * @throws IOException */ public void runStartingFrom(int startPos) throws IOException { log.info("STARTING CSW to LOD.."); if (!(addToServer || saveToFile)) { log.warn("Neither triple store nor file output are activated."); return; } final Stopwatch overallTimer = new Stopwatch(); overallTimer.start(); final Stopwatch retrievingTimer = new Stopwatch(); final Stopwatch mappingTimer = new Stopwatch(); final Stopwatch otherTimer = new Stopwatch(); otherTimer.start(); XmlToRdfMapper mapper = new GluesMapper(config); TripleSink serverSink = null; if (addToServer) { try { serverSink = new VirtuosoServer(config, mapper); } catch (RuntimeException e) { log.error("Could not connect to graph", e); } } TripleSink fileSink = null; if (saveToFile) { fileSink = new FileTripleSink(mapper); } long recordsInTotal = FALLBACK_RECORDS_TOTAL; try { recordsInTotal = csw.getNumberOfRecords(); log.debug("Retrieved number of records from server: {}", recordsInTotal); } catch (IllegalStateException | HttpClientException | XmlException e) { log.error("Could not retrieve number of records from catalog {}, falling back to {}", csw, FALLBACK_RECORDS_TOTAL, e); } report.startIndex = startPos; report.recordNumber = recordsInTotal; otherTimer.stop(); // main loop while (startPos < recordsInTotal) { retrievingTimer.start(); Map<String, GetRecordByIdResponseDocument> records = retrieveRecords(startPos, NUMBER_OF_RECORDS_PER_ITERATION, recordsInTotal); retrievingTimer.stop(); mappingTimer.start(); if (addToServer && serverSink != null) serverSink.addRecords(records, report); if (saveToFile && fileSink != null) fileSink.addRecords(records, report); mappingTimer.stop(); startPos = startPos + NUMBER_OF_RECORDS_PER_ITERATION; log.debug("Finished intermediate run at {}", overallTimer.toString()); } // end of main loop otherTimer.start(); if (fileSink != null) try { fileSink.close(); } catch (Exception e) { log.error("Could not close file sink {}", fileSink, e); } if (serverSink != null) try { serverSink.close(); } catch (Exception e) { log.error("Could not close server sink {}", serverSink, e); } if (!report.issues.isEmpty()) log.error(report.extendedToString()); overallTimer.stop(); otherTimer.stop(); log.info("DONE with CSW to LOD.. duration = {} (retrieving: {}, mapping = {}, other = {})", overallTimer, retrievingTimer, mappingTimer, otherTimer); log.info("Results: {}", report); log.info("Sinks: server = {}, file = {}", addToServer, saveToFile); log.info("Server: {} | File: {}", serverSink, fileSink); }
From source file:es.usc.citius.composit.core.composition.optimization.FunctionalDominanceOptimizer.java
public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) { // Analyze functional dominance between services. This optimization // identifies all dominant services using the semantic inputs and outputs // and the existing matches between the concepts in the graph. Stopwatch globalWatch = Stopwatch.createStarted(); Stopwatch localWatch = Stopwatch.createUnstarted(); List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels()); log.debug("Starting functional dominance optimization..."); for (int i = 0; i < network.numberOfLevels(); i++) { // Analyze input dominance log.debug(" > Analyzing functional dominance on {} (network level {})", network.getOperationsAtLevel(i), i);/* www . j a v a 2s . c om*/ localWatch.start(); Collection<Collection<Operation<E>>> groups = functionalInputEquivalence(network, i); localWatch.stop(); log.debug("\t\tInput equivalence groups: {} (computed in {})", groups, localWatch.toString()); localWatch.reset(); // For each equivalent group in this level, check the output dominance Set<Operation<E>> nonDominatedServices = new HashSet<Operation<E>>(); for (Collection<Operation<E>> group : groups) { log.debug("\t\tAnalyzing output dominance for group {}", group); localWatch.start(); Collection<Collection<Operation<E>>> nonDominatedGroups = functionalOutputDominance(group, network, i); localWatch.stop(); log.debug("\t\t\t+ Non-dominated groups detected: {} (computed in {})", nonDominatedGroups, localWatch.toString()); log.debug("\t\t\t+ Size before / after output dominance {}/{}", group.size(), nonDominatedGroups.size()); // Pick one non dominated service for each group randomly. for (Collection<Operation<E>> ndGroup : nonDominatedGroups) { Operation<E> representant = ndGroup.iterator().next(); log.debug("\t\t\t\t- {} has been selected as the representative service of the group {}", representant, ndGroup); nonDominatedServices.add(representant); } } optimized.add(nonDominatedServices); } localWatch.reset().start(); DirectedAcyclicSMN<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>( new HashLeveledServices<E>(optimized), network); localWatch.stop(); log.debug(" > Functional optimized match network computed in {}", localWatch.toString()); log.debug("Functional Dominance Optimization done in {}. Size before/after {}/{}.", globalWatch.stop().toString(), network.listOperations().size(), optimizedNetwork.listOperations().size()); return optimizedNetwork; }
From source file:yaphyre.raytracer.RayTracer.java
/** * Render the image. This is done in a single thread and is very useful for debugging purposes. The rendering itself * is implemented in the #renderWindow method. * * @param imageWidth The width of the image to create. * @param imageHeight The height of the image. * @param rasterToCamera The camera transformation. * @param overallTime The Stopwatch instance used to time the rendering progress * * @return The elapsed time./*from ww w. j ava 2 s .c om*/ */ private long renderSingleThreaded(final int imageWidth, final int imageHeight, final Transformation rasterToCamera, final Stopwatch overallTime) { LOGGER.info("Using single threaded rendering"); RenderWindow renderWindow = new RenderWindow(0, imageWidth, 0, imageHeight); overallTime.start(); renderWindow(sampler, renderWindow, rasterToCamera); overallTime.stop(); return overallTime.elapsedMillis(); }
From source file:cosmos.mapred.MediawikiQueries.java
public void run(int numIterations) throws Exception { final Random offsetR = new Random(), cardinalityR = new Random(); int iters = 0; while (iters < numIterations) { Store id = Store.create(this.con, this.con.securityOperations().getUserAuthorizations(this.con.whoami()), IdentitySet.<Index>create()); int offset = offsetR.nextInt(MAX_OFFSET); int numRecords = cardinalityR.nextInt(MAX_SIZE) + 1; BatchScanner bs = this.con.createBatchScanner("sortswiki", new Authorizations(), 4); bs.setRanges(Collections.singleton(new Range(Integer.toString(offset), Integer.toString(MAX_ROW)))); Iterable<Entry<Key, Value>> inputIterable = Iterables.limit(bs, numRecords); this.sorts.register(id); System.out.println(Thread.currentThread().getName() + ": " + id.uuid() + " - Iteration " + iters); long recordsReturned = 0l; Function<Entry<Key, Value>, MultimapRecord> func = new Function<Entry<Key, Value>, MultimapRecord>() { @Override/*from www . j a va 2s . co m*/ public MultimapRecord apply(Entry<Key, Value> input) { Page p; try { p = Page.parseFrom(input.getValue().get()); } catch (InvalidProtocolBufferException e) { throw new RuntimeException(e); } return pagesToQueryResult(p); } }; Map<Column, Long> counts = Maps.newHashMap(); ArrayList<MultimapRecord> tformSource = Lists.newArrayListWithCapacity(20000); Stopwatch sw = new Stopwatch(); Stopwatch tformSw = new Stopwatch(); for (Entry<Key, Value> input : inputIterable) { tformSw.start(); MultimapRecord r = func.apply(input); tformSource.add(r); tformSw.stop(); loadCountsForRecord(counts, r); recordsReturned++; } sw.start(); this.sorts.addResults(id, tformSource); sw.stop(); long actualNumResults = tformSource.size(); System.out.println(Thread.currentThread().getName() + ": Took " + tformSw + " transforming and " + sw + " to store " + recordsReturned + " records"); logTiming(actualNumResults, tformSw.elapsed(TimeUnit.MILLISECONDS), "transformInput"); logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "ingest"); bs.close(); Random r = new Random(); int max = r.nextInt(10) + 1; // Run a bunch of queries for (int count = 0; count < max; count++) { long resultCount; String name; int i = r.nextInt(9); if (0 == i) { resultCount = docIdFetch(id, counts, actualNumResults); name = "docIdFetch"; } else if (1 == i) { resultCount = columnFetch(id, REVISION_ID, counts, actualNumResults); name = "revisionIdFetch"; } else if (2 == i) { resultCount = columnFetch(id, PAGE_ID, counts, actualNumResults); name = "pageIdFetch"; } else if (3 == i) { groupBy(id, REVISION_ID, counts, actualNumResults); // no sense to verify here resultCount = recordsReturned; name = "groupByRevisionId"; } else if (4 == i) { groupBy(id, PAGE_ID, counts, actualNumResults); // no sense to verify here resultCount = recordsReturned; name = "groupByRevisionId"; } else if (5 == i) { resultCount = columnFetch(id, CONTRIBUTOR_USERNAME, counts, actualNumResults); name = "contributorUsernameFetch"; } else if (6 == i) { groupBy(id, CONTRIBUTOR_USERNAME, counts, actualNumResults); // no sense to verify here resultCount = recordsReturned; name = "groupByContributorUsername"; } else if (7 == i) { resultCount = columnFetch(id, CONTRIBUTOR_ID, counts, actualNumResults); name = "contributorIdFetch"; } else {//if (8 == i) { groupBy(id, CONTRIBUTOR_ID, counts, actualNumResults); // no sense to verify here resultCount = recordsReturned; name = "groupByContributorID"; } } System.out.println(Thread.currentThread().getName() + ": not deleting " + id); // Delete the results sw = new Stopwatch(); sw.start(); this.sorts.delete(id); sw.stop(); System.out.println(Thread.currentThread().getName() + ": Took " + sw.toString() + " to delete results"); logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "deleteResults"); iters++; } this.sorts.close(); }
From source file:org.sonatype.sisu.bl.support.DefaultBundle.java
/** * Starts application and waits for it to boot. if successfully started sets the state to running. * <p/>/*ww w. j a v a 2 s .c om*/ * {@inheritDoc} * * @throws Exception if a problem occurred during startup of application, wait period or it could not determine if * application is started in specified timeout * @see Bundle#start() */ @Override public void doStart() { bootingTime = Time.millis(0); final Stopwatch bootingWatch = Stopwatch.createUnstarted(); try { startApplication(); running = true; getRunningBundles().add(this); bootingWatch.start(); waitForBoot(); } catch (RuntimeException e) { doStop(); throw e; } finally { if (bootingWatch.isRunning()) { bootingWatch.stop(); } bootingTime = Time.millis(bootingWatch.elapsed(TimeUnit.MILLISECONDS)); } }
From source file:org.apache.drill.exec.store.parquet.columnreaders.AsyncPageReader.java
private DrillBuf decompress(PageHeader pageHeader, DrillBuf compressedData) { DrillBuf pageDataBuf = null;/*from w w w . java 2 s . co m*/ Stopwatch timer = Stopwatch.createUnstarted(); long timeToRead; int compressedSize = pageHeader.getCompressed_page_size(); int uncompressedSize = pageHeader.getUncompressed_page_size(); pageDataBuf = allocateTemporaryBuffer(uncompressedSize); try { timer.start(); CompressionCodecName codecName = parentColumnReader.columnChunkMetaData.getCodec(); ByteBuffer input = compressedData.nioBuffer(0, compressedSize); ByteBuffer output = pageDataBuf.nioBuffer(0, uncompressedSize); DecompressionHelper decompressionHelper = new DecompressionHelper(codecName); decompressionHelper.decompress(input, compressedSize, output, uncompressedSize); pageDataBuf.writerIndex(uncompressedSize); timeToRead = timer.elapsed(TimeUnit.NANOSECONDS); this.updateStats(pageHeader, "Decompress", 0, timeToRead, compressedSize, uncompressedSize); } catch (IOException e) { handleAndThrowException(e, "Error decompressing data."); } return pageDataBuf; }