List of usage examples for com.google.common.base Stopwatch stop
public Stopwatch stop()
From source file:com.github.joshelser.YcsbBatchScanner.java
private void _run() throws Exception { log.info("Computing ranges"); // numRanges/*w w w . j a v a2 s.c o m*/ List<Range> ranges = computeRanges(); log.info("All ranges calculated: {} ranges found", ranges.size()); for (int i = 0; i < numIterations; i++) { List<List<Range>> partitionedRanges = Lists.partition(ranges, numRangesPerPartition); log.info("Executing {} range partitions using a pool of {} threads", partitionedRanges.size(), threadPoolSize); List<Future<Integer>> results = new ArrayList<>(); Stopwatch sw = new Stopwatch(); sw.start(); for (List<Range> partition : partitionedRanges) { // results.add(this.svc.submit(new BatchScannerQueryTask(conn, partition))); results.add(this.svc.submit(new ScannerQueryTask(conn, partition))); } for (Future<Integer> result : results) { log.debug("Found {} results", result.get()); } sw.stop(); log.info("Queries executed in {} ms", sw.elapsed(TimeUnit.MILLISECONDS)); } }
From source file:es.usc.citius.composit.core.composition.search.ForwardServiceDiscoverer.java
public ServiceMatchNetwork<E, T> search(Signature<E> signature) { Set<E> availableInputs = new HashSet<E>(signature.getInputs()); Set<E> newOutputs = new HashSet<E>(signature.getInputs()); Set<E> unmatchedOutputs = new HashSet<E>(signature.getOutputs()); Set<Operation<E>> usedServices = new HashSet<Operation<E>>(); Map<Operation<E>, Set<E>> unmatchedInputMap = new HashMap<Operation<E>, Set<E>>(); List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>(); boolean checkExpectedOutputs = !signature.getOutputs().isEmpty(); boolean stop; Stopwatch timer = Stopwatch.createStarted(); Stopwatch levelTimer = Stopwatch.createUnstarted(); int level = 0; do {/* www . j a va 2 s.co m*/ HashSet<Operation<E>> candidates = new HashSet<Operation<E>>(); levelTimer.start(); candidates.addAll(discovery.findOperationsConsumingSome(newOutputs)); log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(), levelTimer.toString()); // Remove services that cannot be invoked with the available inputs for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) { Operation<E> candidate = it.next(); // Retrieve the unmatched inputs for this operation Set<E> unmatchedInputs = unmatchedInputMap.get(candidate); if (unmatchedInputs == null) { unmatchedInputs = candidate.getSignature().getInputs(); } // Check if the new concepts match some unmatched inputs Set<E> matched = matcher.partialMatch(newOutputs, unmatchedInputs).getTargetElements(); // Don't check invokability if (relaxedMatchCondition) { // Remove only if there is no match at all if (matched.isEmpty()) { it.remove(); } else { boolean isNew = usedServices.add(candidate); if (!isNew) it.remove(); } } else { // Update the unmatchedInputs unmatchedInputs = Sets.newHashSet(Sets.difference(unmatchedInputs, matched)); unmatchedInputMap.put(candidate, unmatchedInputs); // If there are no unmatched inputs, the service is invokable! if (!unmatchedInputs.isEmpty()) { it.remove(); } else { // Invokable operation, check if it was used previously boolean isNew = usedServices.add(candidate); if (!isNew) it.remove(); } } } log.info("\t + [{}] operations selected for this level in {}", candidates.size(), levelTimer.toString()); log.debug("\t\t Candidates: {}", candidates); // Collect the new outputs of the new candidates Set<E> nextOutputs = Operations.outputs(candidates); // Check unmatched outputs Set<E> matchedOutputs = matcher.partialMatch(Sets.union(newOutputs, nextOutputs), unmatchedOutputs) .getTargetElements(); //Set<Resource> matchedOutputs = matcher.matched(newOutputs, unmatchedOutputs); // Update the unmatched outputs unmatchedOutputs = Sets.newHashSet(Sets.difference(unmatchedOutputs, matchedOutputs)); // Update for the next iteration availableInputs.addAll(newOutputs); newOutputs = nextOutputs; // Add the discovered ops if (!candidates.isEmpty()) leveledOps.add(candidates); log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size()); // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied. stop = (checkExpectedOutputs) ? candidates.isEmpty() || unmatchedOutputs.isEmpty() : candidates.isEmpty(); levelTimer.reset(); } while (!stop); // Add the source and sink operations Source<E> sourceOp = new Source<E>(signature.getInputs()); Sink<E> sinkOp = new Sink<E>(signature.getOutputs()); leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp)); leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp)); Stopwatch networkWatch = Stopwatch.createStarted(); // Create a service match network with the discovered services DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps), this.matcher); log.info(" > Service match network computed in {}", networkWatch.stop().toString()); log.info("Service Match Network created with {} levels (including source and sink) and {} operations.", leveledOps.size(), matchNetwork.listOperations().size()); log.info("Forward Discovery done in {}", timer.toString()); this.unmatchedInputMap = unmatchedInputMap; return matchNetwork; }
From source file:de.schildbach.wallet.WalletApplication.java
private void protobufSerializeWallet(final Wallet wallet) throws IOException { final Stopwatch watch = Stopwatch.createStarted(); wallet.saveToFile(walletFile);//from w w w . j av a 2 s . co m watch.stop(); log.info("wallet saved to: '{}', took {}", walletFile, watch); }
From source file:com.vmware.photon.controller.apife.backends.DiskSqlBackend.java
@Transactional public void tombstone(String kind, String diskId) throws ExternalException { Stopwatch tombstoneWatch = Stopwatch.createStarted(); BaseDiskEntity disk = getDiskDao(kind).findById(diskId).orNull(); checkNotNull(disk);//w ww . ja v a2s. c o m String resourceTickedId = projectBackend.findById(disk.getProjectId()).getResourceTicketId(); Stopwatch resourceTicketWatch = Stopwatch.createStarted(); resourceTicketBackend.returnQuota(resourceTickedId, new QuotaCost(disk.getCost())); resourceTicketWatch.stop(); logger.info( "DiskSqlBackend.tombstone for Disk Id: {}, Kind:{}, resourceTicket {}, returnQuota in {} milliseconds", diskId, kind, resourceTickedId, resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS)); tombstoneBackend.create(kind, disk.getId()); getDiskDao(kind).delete(disk); tombstoneWatch.stop(); logger.info("DiskSqlBackend.tombstone for Disk Id: {}, Kind:{} took {} milliseconds", diskId, kind, tombstoneWatch.elapsed(TimeUnit.MILLISECONDS)); }
From source file:org.apache.geode_examples.cq.Example.java
private void startPuttingData(Region region) throws InterruptedException { // Example will run for 20 second Stopwatch stopWatch = Stopwatch.createStarted(); while (stopWatch.elapsed(TimeUnit.SECONDS) < 20) { // 500ms delay to make this easier to follow Thread.sleep(500);/*from w w w .j a v a 2 s . c o m*/ int randomKey = ThreadLocalRandom.current().nextInt(0, 99 + 1); int randomValue = ThreadLocalRandom.current().nextInt(0, 100 + 1); region.put(randomKey, randomValue); System.out.println("Key: " + randomKey + " Value: " + randomValue); } stopWatch.stop(); }
From source file:org.eclipse.tracecompass.totalads.algorithms.AlgorithmUtility.java
/** * This function trains and validate models * * @param trainDirectory/* ww w .j av a2 s. c o m*/ * Train Directory * @param validationDirectory * Validation Directory * @param traceReader * Trace Reader * @param modelsNames * Names of models as an array * @param outStream * Output stream where the algorithm would display its output * @throws TotalADSGeneralException * An exception related to validation of parameters * @throws TotalADSDBMSException * An exception related to DBMS * @throws TotalADSReaderException * An exception related to the trace reader */ public static void trainAndValidateModels(String trainDirectory, String validationDirectory, ITraceTypeReader traceReader, String[] modelsNames, IAlgorithmOutStream outStream) throws TotalADSGeneralException, TotalADSDBMSException, TotalADSReaderException { if (trainDirectory == null || validationDirectory == null || traceReader == null || modelsNames == null || outStream == null) { throw new TotalADSGeneralException(Messages.AlgorithmUtility_NullArguments); } if (trainDirectory.isEmpty() || validationDirectory.isEmpty()) { throw new TotalADSGeneralException(Messages.AlgorithmUtility_EmptyDirectories); } IDataAccessObject dataAcessObject = DBMSFactory.INSTANCE.getDataAccessObject(); if (!dataAcessObject.isConnected()) { throw new TotalADSDBMSException(Messages.AlgorithmUtility_NoDB); } for (int i = 0; i < modelsNames.length; i++) { if (dataAcessObject.datbaseExists(modelsNames[i]) == false) { throw new TotalADSDBMSException( NLS.bind(Messages.AlgorithmUtility_NoDBofTypeFound, modelsNames[i])); } } Stopwatch stopwatch = Stopwatch.createStarted(); for (int i = 0; i < modelsNames.length; i++) { Boolean isLastTrace = false; String modelName = modelsNames[i]; outStream.addOutputEvent(NLS.bind(Messages.AlgorithmUtility_ModelingOn, modelName)); outStream.addNewLine(); // ////////////////// // /File verifications of traces // ///////////////// // Check for valid trace type reader and training traces before // creating a database // Get a file handler File fileList[] = getDirectoryHandler(trainDirectory, traceReader); try (ITraceIterator it = traceReader.getTraceIterator(fileList[0])) { } catch (TotalADSReaderException ex) { stopwatch.stop(); String message = Messages.AlgorithmUtility_InvalidTrainingTraces + ex.getMessage(); throw new TotalADSGeneralException(message); } // Check for valid trace type reader and validation traces before // creating a database File validationFileList[] = getDirectoryHandler(validationDirectory, traceReader); try (ITraceIterator it = traceReader.getTraceIterator(validationFileList[0]);) { } catch (TotalADSReaderException ex) { stopwatch.stop(); String message = Messages.AlgorithmUtility_InvalidValidationTraces + ex.getMessage(); throw new TotalADSGeneralException(message); } // ///////// // Start training // //////// outStream.addOutputEvent(Messages.AlgorithmUtility_ModelTraining); outStream.addNewLine(); IDetectionAlgorithm algorithm = getAlgorithmFromModelName(modelName); for (int trcCnt = 0; trcCnt < fileList.length; trcCnt++) { if (trcCnt == fileList.length - 1) { isLastTrace = true; } // Get the trace try (ITraceIterator trace = traceReader.getTraceIterator(fileList[trcCnt])) { outStream.addOutputEvent(NLS.bind(Messages.AlgorithmUtility_CurrentTrainingTrace, (trcCnt + 1), fileList[trcCnt].getName())); outStream.addNewLine(); algorithm.train(trace, isLastTrace, modelName, dataAcessObject, outStream); } // Check if user has asked to stop modeling if (Thread.currentThread().isInterrupted()) { break; } } // Start validation validateModels(validationFileList, traceReader, algorithm, modelName, outStream, dataAcessObject); // Check if user has asked to stop modeling if (Thread.currentThread().isInterrupted()) { break; } } stopwatch.stop(); Long elapsedMins = stopwatch.elapsed(TimeUnit.MINUTES); Long elapsedSecs = stopwatch.elapsed(TimeUnit.SECONDS); String msg = NLS.bind(Messages.AlgorithmUtility_TotalTime, elapsedMins, elapsedSecs); outStream.addOutputEvent(msg); outStream.addNewLine(); }
From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ServiceManagerIndexRdf.java
/** * This method will be called when the server is initialised. * If necessary it should take care of updating any indexes on boot time. *///www .j a v a2 s . c om private void initialise() { Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); populateCache(); stopwatch.stop(); log.info("Cache populated. Time taken {}", stopwatch); }
From source file:annis.gui.exporter.GeneralTextExporter.java
@Override public boolean convertText(String queryAnnisQL, int contextLeft, int contextRight, Set<String> corpora, List<String> keys, String argsAsString, WebResource annisResource, Writer out, EventBus eventBus) { try {/*w ww .jav a 2 s .co m*/ // int count = service.getCount(corpusIdList, queryAnnisQL); if (keys == null || keys.isEmpty()) { // auto set keys = new LinkedList<>(); keys.add("tok"); List<AnnisAttribute> attributes = new LinkedList<>(); for (String corpus : corpora) { attributes.addAll(annisResource.path("corpora").path(urlPathEscape.escape(corpus)) .path("annotations").queryParam("fetchvalues", "false") .queryParam("onlymostfrequentvalues", "false").get(new AnnisAttributeListType())); } for (AnnisAttribute a : attributes) { if (a.getName() != null) { String[] namespaceAndName = a.getName().split(":", 2); if (namespaceAndName.length > 1) { keys.add(namespaceAndName[1]); } else { keys.add(namespaceAndName[0]); } } } } Map<String, String> args = new HashMap<>(); for (String s : argsAsString.split("&|;")) { String[] splitted = s.split("=", 2); String key = splitted[0]; String val = ""; if (splitted.length > 1) { val = splitted[1]; } args.put(key, val); } int stepSize = 10; // 1. Get all the matches as Salt ID InputStream matchStream = annisResource.path("search/find/") .queryParam("q", Helper.encodeJersey(queryAnnisQL)) .queryParam("corpora", StringUtils.join(corpora, ",")).accept(MediaType.TEXT_PLAIN_TYPE) .get(InputStream.class); try (BufferedReader inReader = new BufferedReader(new InputStreamReader(matchStream, "UTF-8"))) { WebResource subgraphRes = annisResource.path("search/subgraph"); MatchGroup currentMatches = new MatchGroup(); String currentLine; int offset = 0; // 2. iterate over all matches and get the sub-graph for a group of matches while (!Thread.currentThread().isInterrupted() && (currentLine = inReader.readLine()) != null) { Match match = Match.parseFromString(currentLine); currentMatches.getMatches().add(match); if (currentMatches.getMatches().size() >= stepSize) { WebResource res = subgraphRes.queryParam("left", "" + contextLeft).queryParam("right", "" + contextRight); if (args.containsKey("segmentation")) { res = res.queryParam("segmentation", args.get("segmentation")); } SubgraphFilter filter = getSubgraphFilter(); if (filter != null) { res = res.queryParam("filter", filter.name()); } Stopwatch stopwatch = new Stopwatch(); stopwatch.start(); SaltProject p = res.post(SaltProject.class, currentMatches); stopwatch.stop(); // dynamically adjust the number of items to fetch if single subgraph // export was fast enough if (stopwatch.elapsed(TimeUnit.MILLISECONDS) < 500 && stepSize < 50) { stepSize += 10; } convertText(LegacyGraphConverter.convertToResultSet(p), keys, args, out, offset - currentMatches.getMatches().size()); currentMatches.getMatches().clear(); if (eventBus != null) { eventBus.post(offset + 1); } } offset++; } // end for each line if (Thread.interrupted()) { // return from loop and abort export log.info("Exporter job was interrupted"); return false; } // query the left over matches if (!currentMatches.getMatches().isEmpty()) { WebResource res = subgraphRes.queryParam("left", "" + contextLeft).queryParam("right", "" + contextRight); if (args.containsKey("segmentation")) { res = res.queryParam("segmentation", args.get("segmentation")); } SubgraphFilter filter = getSubgraphFilter(); if (filter != null) { res = res.queryParam("filter", filter.name()); } SaltProject p = res.post(SaltProject.class, currentMatches); convertText(LegacyGraphConverter.convertToResultSet(p), keys, args, out, offset - currentMatches.getMatches().size() - 1); } offset = 0; } out.append("\n"); out.append("\n"); out.append("finished"); return true; } catch (AnnisQLSemanticsException | AnnisQLSyntaxException | AnnisCorpusAccessException | RemoteException ex) { log.error(null, ex); } catch (IOException ex) { log.error(null, ex); } return false; }
From source file:de.metas.ui.web.config.ServletLoggingFilter.java
@Override public void doFilter(final ServletRequest request, final ServletResponse response, final FilterChain chain) throws IOException, ServletException { final Stopwatch stopwatch = Stopwatch.createStarted(); try {/*from www. j ava 2s .c om*/ updateMDC(request); chain.doFilter(request, response); } finally { // // log the request if (logger.isInfoEnabled()) { final String requestInfo = extractRequestInfo(request); logger.info("Executed in {}: {}", stopwatch.stop(), requestInfo); } // // Cleanup MDC (keep it last) cleanupMDC(); } }
From source file:org.geogit.repository.RevTreeBuilder2.java
/** * @return the new tree, not saved to the object database. Any bucket tree though is saved when * this method returns./* w w w . j a v a 2 s.c om*/ */ public RevTree build() { if (nodeIndex == null) { return original.builder(db).build(); } Stopwatch sw = new Stopwatch().start(); RevTreeBuilder builder; try { builder = new RevTreeBuilder(db, original); Iterator<Node> nodes = nodeIndex.nodes(); while (nodes.hasNext()) { Node node = nodes.next(); builder.put(node); } } finally { nodeIndex.close(); } LOGGER.debug("Index traversed in {}", sw.stop()); sw.reset().start(); RevTree namedTree = builder.build(); saveExtraFeatureTypes(); LOGGER.debug("RevTreeBuilder.build() in {}", sw.stop()); return namedTree; }