List of usage examples for java.util.concurrent.atomic AtomicLong get
public final long get()
From source file:org.deeplearning4j.models.word2vec.Word2Vec.java
private void doIteration(Collection<List<VocabWord>> batch2, final AtomicLong numWordsSoFar, final AtomicLong nextRandom, ActorSystem actorSystem) { final AtomicLong lastReported = new AtomicLong(System.currentTimeMillis()); Parallelization.iterateInParallel(batch2, new Parallelization.RunnableWithParams<List<VocabWord>>() { @Override/*from w w w . j a va 2s . c om*/ public void run(List<VocabWord> sentence, Object[] args) { double alpha = Math.max(minLearningRate, Word2Vec.this.alpha.get() * (1 - (1.0 * numWordsSoFar.get() / (double) totalWords))); long now = System.currentTimeMillis(); long diff = Math.abs(now - lastReported.get()); if (numWordsSoFar.get() > 0 && diff > 1000) { lastReported.set(now); log.info("Words so far " + numWordsSoFar.get() + " with alpha at " + alpha); } trainSentence(sentence, nextRandom, alpha); numWordsSoFar.set(numWordsSoFar.get() + sentence.size()); } }, actorSystem); }
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testScheduledStats() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.start();/*from w w w.j a v a2 s.c o m*/ Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageProducer producer = session.createProducer(session.createQueue(defaultQueueName)); producer.setDeliveryDelay(2000); producer.send(session.createTextMessage("test")); verifyPendingStats(defaultQueueName, 1, publishedMessageSize.get()); verifyPendingDurableStats(defaultQueueName, 1, publishedMessageSize.get()); verifyScheduledStats(defaultQueueName, 1, publishedMessageSize.get()); consumeTestQueueMessages(1); verifyPendingStats(defaultQueueName, 0, 0); verifyPendingDurableStats(defaultQueueName, 0, 0); verifyScheduledStats(defaultQueueName, 0, 0); connection.close(); }
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testTopicMessageSize() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.setClientID("clientId"); connection.start();/* ww w .j av a2s. co m*/ Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageConsumer consumer = session.createConsumer(session.createTopic(defaultTopicName)); publishTestTopicMessages(200, publishedMessageSize); verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get()); verifyPendingDurableStats(defaultQueueName, 0, 0); // consume all messages consumeTestMessages(consumer, 200); // All messages should now be gone verifyPendingStats(defaultTopicName, 0, 0); verifyPendingDurableStats(defaultQueueName, 0, 0); connection.close(); }
From source file:org.deeplearning4j.models.word2vec.Word2Vec.java
protected void addWords(List<VocabWord> sentence, AtomicLong nextRandom, List<VocabWord> currMiniBatch) { for (VocabWord word : sentence) { if (word == null) continue; // The subsampling randomly discards frequent words while keeping the ranking same if (sample > 0) { double numDocs = vectorizer.index().numDocuments(); double ran = (Math.sqrt(word.getWordFrequency() / (sample * numDocs)) + 1) * (sample * numDocs) / word.getWordFrequency(); if (ran < (nextRandom.get() & 0xFFFF) / (double) 65536) { continue; }/* w w w .j ava 2 s. c om*/ currMiniBatch.add(word); } else currMiniBatch.add(word); } }
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testMessageSizeTwoDurables() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.setClientID("clientId"); connection.start();//www . j a v a2 s.c om publishTestMessagesDurable(connection, new String[] { "sub1", "sub2" }, 200, publishedMessageSize, DeliveryMode.PERSISTENT, false); // verify the count and size - double because two durables so two queue // bindings verifyPendingStats(defaultTopicName, 400, 2 * publishedMessageSize.get()); verifyPendingDurableStats(defaultTopicName, 400, 2 * publishedMessageSize.get()); // consume messages just for sub1 consumeDurableTestMessages(connection, "sub1", 200, publishedMessageSize); // There is still a durable that hasn't consumed so the messages should // exist verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get()); verifyPendingDurableStats(defaultTopicName, 200, publishedMessageSize.get()); connection.close(); // restart and verify load this.killServer(); this.restartServer(); verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get()); verifyPendingDurableStats(defaultTopicName, 200, publishedMessageSize.get()); }
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testTopicNonPersistentMessageSize() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.setClientID("clientId"); connection.start();/*from w ww . j av a 2 s .com*/ Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageConsumer consumer = session.createConsumer(session.createTopic(defaultTopicName)); publishTestTopicMessages(200, DeliveryMode.NON_PERSISTENT, publishedMessageSize); verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get()); // consume all messages consumeTestMessages(consumer, 200); // All messages should now be gone verifyPendingStats(defaultTopicName, 0, 0); connection.close(); }
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testMessageSizeOneDurable() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.setClientID("clientId"); connection.start();//from w w w. j a v a 2 s.c o m publishTestMessagesDurable(connection, new String[] { "sub1" }, 200, publishedMessageSize, DeliveryMode.PERSISTENT, false); // verify the count and size - durable is offline so all 200 should be // pending since none are in prefetch verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get()); verifyPendingDurableStats(defaultTopicName, 200, publishedMessageSize.get()); // consume all messages consumeDurableTestMessages(connection, "sub1", 200, publishedMessageSize); // All messages should now be gone verifyPendingStats(defaultTopicName, 0, 0); verifyPendingDurableStats(defaultTopicName, 0, 0); connection.close(); }
From source file:com.linkedin.pinot.perf.QueryRunner.java
/** * Use multiple threads to run queries as fast as possible. * * Start {numThreads} worker threads to send queries (blocking call) back to back, and use the main thread to collect * the statistic information and log them periodically. * * @param conf perf benchmark driver config. * @param queryFile query file.//from w w w . j a va 2s. c om * @param numThreads number of threads sending queries. * @throws Exception */ @SuppressWarnings("InfiniteLoopStatement") public static void multiThreadedsQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, final int numThreads) throws Exception { final long randomSeed = 123456789L; final Random random = new Random(randomSeed); final int reportIntervalMillis = 3000; final List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } final int numQueries = queries.size(); final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); final AtomicInteger counter = new AtomicInteger(0); final AtomicLong totalResponseTime = new AtomicLong(0L); final ExecutorService executorService = Executors.newFixedThreadPool(numThreads); final DescriptiveStatistics stats = new DescriptiveStatistics(); final CountDownLatch latch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Runnable() { @Override public void run() { for (int j = 0; j < numQueries; j++) { String query = queries.get(random.nextInt(numQueries)); long startTime = System.currentTimeMillis(); try { driver.postQuery(query); long clientTime = System.currentTimeMillis() - startTime; synchronized (stats) { stats.addValue(clientTime); } counter.getAndIncrement(); totalResponseTime.getAndAdd(clientTime); } catch (Exception e) { LOGGER.error("Caught exception while running query: {}", query, e); return; } } latch.countDown(); } }); } executorService.shutdown(); int iter = 0; long startTime = System.currentTimeMillis(); while (latch.getCount() > 0) { Thread.sleep(reportIntervalMillis); double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND; int count = counter.get(); double avgResponseTime = ((double) totalResponseTime.get()) / count; LOGGER.info("Time Passed: {}s, Query Executed: {}, QPS: {}, Avg Response Time: {}ms", timePassedSeconds, count, count / timePassedSeconds, avgResponseTime); iter++; if (iter % 10 == 0) { printStats(stats); } } printStats(stats); }
From source file:org.lendingclub.mercator.solarwinds.SolarwindsScanner.java
public void getNodeInformation() { try {//from ww w . jav a2 s . c o m ObjectNode response = querySolarwinds("SELECT Nodes.NodeID, Nodes.SysName, Nodes.Caption, " + "Nodes.Description, Nodes.IOSVersion, Nodes.CustomProperties.SerialNumber, Nodes.MachineType, " + "Nodes.Vendor, Nodes.IPAddress, Nodes.SysObjectID, Nodes.DNS, Nodes.ObjectSubType, " + "Nodes.Status, Nodes.StatusDescription, Nodes.CustomProperties.Department, Nodes.Location," + " Nodes.CustomProperties.City FROM Orion.Nodes ORDER BY Nodes.SysName"); AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE); AtomicBoolean error = new AtomicBoolean(false); response.path("results").forEach(v -> { try { //solarwindsID is the hashedURL+nodeID getProjector().getNeoRxClient().execCypher( "merge(a: SolarwindsNode {solarwindsID:{solarwindsID}}) set a+={props}, a.updateTs=timestamp() return a", "solarwindsID", solarwindsScannerBuilder.hashURL + v.path("NodeID"), "props", flattenNode(v)).blockingFirst(MissingNode.getInstance()); } catch (Exception e) { logger.warn("problem", e); error.set(true); } }); if (error.get() == false) { getNeoRxClient().execCypher( "match(a: SolarwindsNode) where a.solarwindsID={solarwindsID} and a.updateTs<{cutoff} detach delete a", "solarwindsID", solarwindsScannerBuilder.hashURL, "cutoff", earlistUpdate.get()); } } catch (Exception e) { logger.info(e.toString()); } }
From source file:org.apache.blur.store.hdfs.HdfsDirectory.java
protected TimerTask reportOnBlockLocality() { final Counter totalHdfsBlock = _metricsGroup.totalHdfsBlock; final Counter localHdfsBlock = _metricsGroup.localHdfsBlock; final AtomicLong prevTotalCount = new AtomicLong(); final AtomicLong prevLocalCount = new AtomicLong(); return new TimerTask() { @Override// w w w . java 2 s.co m public void run() { try { long[] counts = runReport(); long total = counts[0]; long local = counts[1]; long prevTotal = prevTotalCount.get(); long prevLocal = prevLocalCount.get(); totalHdfsBlock.inc(total - prevTotal); localHdfsBlock.inc(local - prevLocal); prevTotalCount.set(total); prevLocalCount.set(local); } catch (Exception e) { LOG.error("Unknown error.", e); } } }; }