List of usage examples for com.google.common.base Stopwatch createStarted
@CheckReturnValue public static Stopwatch createStarted()
From source file:org.apache.brooklyn.core.test.qa.longevity.EntityCleanupLongevityTestFixture.java
protected void doTestManyTimesAndAssertNoMemoryLeak(String testName, Runnable iterationBody) { int iterations = numIterations(); Stopwatch timer = Stopwatch.createStarted(); long last = timer.elapsed(TimeUnit.MILLISECONDS); long memUsedNearStart = -1; for (int i = 0; i < iterations; i++) { if (i % 100 == 0 || i < 5) { long now = timer.elapsed(TimeUnit.MILLISECONDS); System.gc();/*from w w w .ja v a2s. c o m*/ System.gc(); String msg = testName + " iteration " + i + " at " + Time.makeTimeStringRounded(now) + " (delta " + Time.makeTimeStringRounded(now - last) + "), using " + ((AbstractManagementContext) managementContext).getGarbageCollector().getUsageString(); LOG.info(msg); if (i >= 100 && memUsedNearStart < 0) { // set this the first time we've run 100 times (let that create a baseline with classes loaded etc) memUsedNearStart = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); } last = timer.elapsed(TimeUnit.MILLISECONDS); } iterationBody.run(); } BrooklynStorage storage = ((ManagementContextInternal) managementContext).getStorage(); Assert.assertTrue(storage.isMostlyEmpty(), "Not empty storage: " + storage); DataGrid dg = ((BrooklynStorageImpl) storage).getDataGrid(); Set<String> keys = dg.getKeys(); for (String key : keys) { ConcurrentMap<Object, Object> v = dg.getMap(key); if (v.isEmpty()) continue; // TODO currently we remember ApplicationUsage if (key.contains("usage-application")) { Assert.assertTrue(v.size() <= iterations, "Too many usage-application entries: " + v.size()); continue; } Assert.fail("Non-empty key in datagrid: " + key + " (" + v + ")"); } ConcurrentMap<Object, TaskScheduler> schedulers = ((BasicExecutionManager) managementContext .getExecutionManager()).getSchedulerByTag(); // TODO would like to assert this // Assert.assertTrue( schedulers.isEmpty(), "Not empty schedulers: "+schedulers); // but weaker form for now Assert.assertTrue(schedulers.size() <= 3 * iterations, "Not empty schedulers: " + schedulers.size() + " after " + iterations + ", " + schedulers); // memory leak detection only applies to subclasses who run lots of iterations if (checkMemoryLeaks()) assertNoMemoryLeak(memUsedNearStart); }
From source file:com.hortonworks.streamline.streams.service.CatalogResourceUtil.java
static TopologyDetailedResponse enrichTopology(Topology topology, String asUser, Integer latencyTopN, EnvironmentService environmentService, TopologyActionsService actionsService, TopologyMetricsService metricsService, StreamCatalogService catalogService) { LOG.debug("[START] enrichTopology - topology id: {}", topology.getId()); Stopwatch stopwatch = Stopwatch.createStarted(); try {/*w w w .j av a2 s .com*/ if (latencyTopN == null) { latencyTopN = DEFAULT_N_OF_TOP_N_LATENCY; } TopologyDetailedResponse detailedResponse; String namespaceName = null; Namespace namespace = environmentService.getNamespace(topology.getNamespaceId()); if (namespace != null) { namespaceName = namespace.getName(); } try { String runtimeTopologyId = actionsService.getRuntimeTopologyId(topology, asUser); TopologyMetrics.TopologyMetric topologyMetric = metricsService.getTopologyMetric(topology, asUser); List<Pair<String, Double>> latenciesTopN = metricsService.getTopNAndOtherComponentsLatency(topology, asUser, latencyTopN); detailedResponse = new TopologyDetailedResponse(topology, TopologyRunningStatus.RUNNING, namespaceName); detailedResponse .setRuntime(new TopologyRuntimeResponse(runtimeTopologyId, topologyMetric, latenciesTopN)); } catch (TopologyNotAliveException e) { LOG.debug("Topology {} is not alive", topology.getId()); detailedResponse = new TopologyDetailedResponse(topology, TopologyRunningStatus.NOT_RUNNING, namespaceName); catalogService.getTopologyState(topology.getId()).ifPresent(state -> { if (TopologyStateFactory.getInstance() .getTopologyState(state.getName()) == TopologyStates.TOPOLOGY_STATE_DEPLOYED) { try { LOG.info("Force killing streamline topology since its not alive in the cluster"); actionsService.killTopology(topology, asUser); } catch (Exception ex) { LOG.error("Error trying to kill topology", ex); } } }); } catch (StormNotReachableException | IOException e) { LOG.error("Storm is not reachable or fail to operate", e); detailedResponse = new TopologyDetailedResponse(topology, TopologyRunningStatus.UNKNOWN, namespaceName); } catch (Exception e) { LOG.error("Unhandled exception occurs while operate with Storm", e); detailedResponse = new TopologyDetailedResponse(topology, TopologyRunningStatus.UNKNOWN, namespaceName); } LOG.debug("[END] enrichTopology - topology id: {}, elapsed: {} ms", topology.getId(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); return detailedResponse; } finally { stopwatch.stop(); } }
From source file:org.lenskit.eval.traintest.ExperimentJob.java
@Override protected void compute() { ExperimentOutputLayout layout = experiment.getOutputLayout(); TableWriter globalOutput = layout.prefixTable(experiment.getGlobalOutput(), dataSet, algorithm); TableWriter userOutput = layout.prefixTable(experiment.getUserOutput(), dataSet, algorithm); RowBuilder outputRow = globalOutput.getLayout().newRowBuilder(); logger.info("Building {} on {}", algorithm, dataSet); Stopwatch buildTimer = Stopwatch.createStarted(); try (LenskitRecommender rec = buildRecommender()) { buildTimer.stop();//w w w . j a v a2 s . co m logger.info("Built {} in {}", algorithm.getName(), buildTimer); logger.info("Measuring {} on {}", algorithm.getName(), dataSet.getName()); RowBuilder userRow = userOutput != null ? userOutput.getLayout().newRowBuilder() : null; Stopwatch testTimer = Stopwatch.createStarted(); List<ConditionEvaluator> accumulators = Lists.newArrayList(); for (EvalTask task : experiment.getTasks()) { ConditionEvaluator ce = task.createConditionEvaluator(algorithm, dataSet, rec); if (ce != null) { accumulators.add(ce); } else { logger.warn("Could not instantiate task {} for algorithm {} on data set {}", task, algorithm, dataSet); } } LongSet testUsers = dataSet.getTestData().getUserDAO().getUserIds(); UserEventDAO trainEvents = dataSet.getTrainingData().getUserEventDAO(); UserEventDAO userEvents = dataSet.getTestData().getUserEventDAO(); final NumberFormat pctFormat = NumberFormat.getPercentInstance(); pctFormat.setMaximumFractionDigits(2); pctFormat.setMinimumFractionDigits(2); final int nusers = testUsers.size(); logger.info("Testing {} on {} ({} users)", algorithm, dataSet, nusers); ProgressLogger progress = ProgressLogger.create(logger).setCount(nusers).setLabel("testing users") .start(); for (LongIterator iter = testUsers.iterator(); iter.hasNext();) { if (Thread.interrupted()) { throw new EvaluationException("eval job interrupted"); } long uid = iter.nextLong(); if (userRow != null) { userRow.add("User", uid); } UserHistory<Event> trainData = trainEvents.getEventsForUser(uid); if (trainData == null) { trainData = History.forUser(uid); } UserHistory<Event> userData = userEvents.getEventsForUser(uid); TestUser user = new TestUser(trainData, userData); Stopwatch userTimer = Stopwatch.createStarted(); for (ConditionEvaluator eval : accumulators) { Map<String, Object> ures = eval.measureUser(user); if (userRow != null) { userRow.addAll(ures); } } userTimer.stop(); if (userRow != null) { userRow.add("TestTime", userTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001); assert userOutput != null; try { userOutput.writeRow(userRow.buildList()); } catch (IOException e) { throw new EvaluationException("error writing user row", e); } userRow.clear(); } progress.advance(); } progress.finish(); testTimer.stop(); logger.info("Tested {} in {}", algorithm.getName(), testTimer); outputRow.add("BuildTime", buildTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001); outputRow.add("TestTime", testTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001); for (ConditionEvaluator eval : accumulators) { outputRow.addAll(eval.finish()); } } catch (UncheckedInterruptException ex) { logger.info("evaluation interrupted"); throw ex; } catch (Throwable th) { logger.error("Error evaluating " + algorithm + " on " + dataSet, th); throw th; } try { globalOutput.writeRow(outputRow.buildList()); } catch (IOException e) { throw new EvaluationException("error writing output row", e); } }
From source file:com.minestellar.moon.MinestellarMoon.java
@EventHandler public void postInit(FMLPostInitializationEvent event) { Stopwatch stopwatch = Stopwatch.createStarted(); MinestellarMoon.proxy.postInit(event); log.info("PostInitialization Completed in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms."); }
From source file:benchmarkio.producer.activemq.ActiveMQMessageProducer.java
private void produce(final String topic, final TextMessage message) { for (int i = 0; i < numberOfMessagesToProduce; i++) { try {//ww w . ja v a 2s . c om log.debug("Publishing message to ActiveMQ topic {}\n{}", topic, message); // Start final Stopwatch stopwatch = Stopwatch.createStarted(); producer.send(message); // End stopwatch.stop(); histogram.recordValue(stopwatch.elapsed(Consts.TIME_UNIT_FOR_REPORTING)); } catch (final Exception e) { log.error("Error publishing message to ActiveMQ topic {}\n{}", topic, message.toString()); } } log.info("Finished production of {} messages", numberOfMessagesToProduce); }
From source file:org.lenskit.knn.item.model.NormalizingItemItemModelProvider.java
@Override public SimilarityMatrixModel get() { logger.debug("building item-item model"); LongSortedSet itemUniverse = buildContext.getItems(); final int nitems = itemUniverse.size(); SortedKeyIndex itemDomain = SortedKeyIndex.fromCollection(itemUniverse); assert itemDomain.size() == nitems; List<Long2DoubleMap> matrix = Lists.newArrayListWithCapacity(itemDomain.size()); // working space for accumulating each row (reuse between rows) Stopwatch timer = Stopwatch.createStarted(); for (int i = 0; i < nitems; i++) { assert matrix.size() == i; final long rowItem = itemDomain.getKey(i); final Long2DoubleSortedMap vec1 = buildContext.itemVector(rowItem); // Take advantage of sparsity if we can LongIterator neighbors = iterationStrategy.neighborIterator(buildContext, rowItem, false); Long2DoubleMap row = new Long2DoubleOpenHashMap(itemDomain.size()); // Compute similarities and populate the vector while (neighbors.hasNext()) { final long colItem = neighbors.nextLong(); if (colItem == rowItem) { continue; }//from w ww .j a v a 2 s . co m final Long2DoubleSortedMap vec2 = buildContext.itemVector(colItem); row.put(colItem, similarity.similarity(rowItem, vec1, colItem, vec2)); } // Normalize and truncate the row row = rowNormalizer.makeTransformation(rowItem, row).apply(row); row = truncator.truncate(row); matrix.add(LongUtils.frozenMap(row)); } timer.stop(); logger.info("built model for {} items in {}", nitems, timer); return new SimilarityMatrixModel(itemDomain, matrix); }
From source file:org.smartdeveloperhub.curator.connector.LoggedConnectorFuture.java
@Override public Enrichment get(final long timeout, final TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { final Stopwatch waiting = Stopwatch.createStarted(); LOGGER.trace("Waiting for acknowledgment..."); try {//from www.ja v a 2 s . c o m final Enrichment replyOrNull = this.delegate.get(timeout, unit); logAcknowledgeReception(waiting); return replyOrNull; } catch (final Exception e) { LOGGER.trace("Did not receive acknowledgment after {} milliseconds", waiting.elapsed(TimeUnit.MILLISECONDS)); throw e; } }
From source file:org.opendaylight.controller.cluster.datastore.MemberNode.java
public static void verifyRaftState(DistributedDataStore datastore, String shardName, RaftStateVerifier verifier) throws Exception { ActorContext actorContext = datastore.getActorContext(); Future<ActorRef> future = actorContext.findLocalShardAsync(shardName); ActorRef shardActor = Await.result(future, Duration.create(10, TimeUnit.SECONDS)); AssertionError lastError = null; Stopwatch sw = Stopwatch.createStarted(); while (sw.elapsed(TimeUnit.SECONDS) <= 5) { OnDemandRaftState raftState = (OnDemandRaftState) actorContext.executeOperation(shardActor, GetOnDemandRaftState.INSTANCE); try {//from ww w . jav a2s . c o m verifier.verify(raftState); return; } catch (AssertionError e) { lastError = e; Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS); } } throw lastError; }
From source file:io.silverware.microservices.providers.cluster.ClusterMicroserviceProvider.java
@Override public void run() { try {//from w w w. j a v a 2s . com final Stopwatch stopwatch = Stopwatch.createStarted(); // get jgroups configuration final String clusterGroup = (String) this.context.getProperties().get(CLUSTER_GROUP); final String clusterConfiguration = (String) this.context.getProperties().get(CLUSTER_CONFIGURATION); this.timeout = Long.valueOf(this.context.getProperties().get(CLUSTER_LOOKUP_TIMEOUT).toString()); log.info("Hello from Cluster microservice provider!"); log.info("Loading cluster configuration from: {} ", clusterConfiguration); channel = new JChannel(clusterConfiguration); JgroupsMessageReceiver receiver = new JgroupsMessageReceiver( KnownImplementation.initializeReponders(context), remoteServiceHandlesStore); this.messageDispatcher = new MessageDispatcher(channel, receiver, receiver, receiver); this.sender = new JgroupsMessageSender(this.messageDispatcher); channel.setDiscardOwnMessages(true); log.info("Setting cluster group: {} ", clusterGroup); Utils.waitForCDIProvider(context); channel.connect(clusterGroup); receiver.setMyAddress(channel.getAddress()); stopwatch.stop(); log.info("Initialization of ClusterMicroserviceProvider took {} ms. ", stopwatch.elapsed(TimeUnit.MILLISECONDS)); } catch (Exception e) { log.error("Cluster microservice initialization failed.", e); throw new SilverWareClusteringException(INITIALIZATION_ERROR, e); } try { while (!Thread.currentThread().isInterrupted()) { Thread.sleep(2000); } } catch (final Exception e) { // expected end of platform } finally { log.info("Bye from Cluster microservice provider!"); try { this.messageDispatcher.close(); this.channel.close(); } catch (IOException e) { throw new SilverWareClusteringException(JGROUPS_ERROR, "Unexpected error while closing MessageDispatcher", e); } } }
From source file:eu.numberfour.n4js.ui.editor.syntaxcoloring.HighlightingParser.java
private List<Token> doParse(CharStream in) { TokenSource tokenSource = createLexer(in); LazyTokenStream tokenStream = createTokenStream(tokenSource); setInitialHiddenTokens(tokenStream); InternalN4JSParser parser = createParser(tokenStream); IUnorderedGroupHelper helper = unorderedGroupHelper.get(); if (!(helper instanceof IUnorderedGroupHelper.Null)) { throw new IllegalStateException("Unexpected usage of unordered groups."); }/*from w w w . jav a 2s . c o m*/ Stopwatch stopwatch = null; boolean debug = LOGGER.isDebugEnabled(); // boolean debug = true; if (debug) { stopwatch = Stopwatch.createStarted(); } try { parser.entryRuleScript(); while (tokenStream.LT(1) != Token.EOF_TOKEN) { tokenStream.consume(); } @SuppressWarnings("unchecked") List<Token> result = tokenStream.getTokens(); return result; } catch (Exception re) { throw new ParseException(re.getMessage(), re); } finally { if (debug) { assert stopwatch != null; long elapsed = stopwatch.stop().elapsed(TimeUnit.MILLISECONDS); if (elapsed > 5) { LOGGER.warn("Coloring parser took: " + elapsed); } } } }