List of usage examples for java.util.concurrent.atomic AtomicLong get
public final long get()
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testTopicPersistentAndNonPersistentMessageSize() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); AtomicLong publishedNonPersistentMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.setClientID("clientId"); connection.start();/*from ww w . j a v a 2 s. c om*/ Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageConsumer consumer = session.createConsumer(session.createTopic(defaultTopicName)); publishTestTopicMessages(100, DeliveryMode.NON_PERSISTENT, publishedNonPersistentMessageSize); publishTestTopicMessages(100, DeliveryMode.PERSISTENT, publishedMessageSize); verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get() + publishedNonPersistentMessageSize.get()); // consume all messages consumeTestMessages(consumer, 200); // All messages should now be gone verifyPendingStats(defaultTopicName, 0, 0); connection.close(); }
From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java
@Test public void testMessageSizeSharedDurable() throws Exception { AtomicLong publishedMessageSize = new AtomicLong(); Connection connection = cf.createConnection(); connection.setClientID("clientId"); connection.start();//www.j a v a 2 s. com // The publish method will create a second shared consumer Session s = connection.createSession(); MessageConsumer c = s.createSharedDurableConsumer(s.createTopic(defaultTopicName), "sub1"); publishTestMessagesDurable(connection, new String[] { "sub1", }, 200, publishedMessageSize, DeliveryMode.PERSISTENT, true); // verify the count and size - double because two durables so two queue // bindings verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get()); verifyPendingDurableStats(defaultTopicName, 200, publishedMessageSize.get()); c.close(); // consume messages for sub1 consumeDurableTestMessages(connection, "sub1", 200, publishedMessageSize); verifyPendingStats(defaultTopicName, 0, publishedMessageSize.get()); verifyPendingDurableStats(defaultTopicName, 0, publishedMessageSize.get()); connection.close(); }
From source file:org.commonjava.indy.core.ctl.NfcController.java
public NotFoundCacheInfoDTO getInfo(StoreKey key) throws IndyWorkflowException { NotFoundCacheInfoDTO dto = new NotFoundCacheInfoDTO(); final AtomicLong size = new AtomicLong(0); try {/*ww w . jav a 2 s . co m*/ switch (key.getType()) { case group: { //Warn: This is very expensive if group holds thousands of repositories final List<StoreKey> stores = storeManager.query().packageType(key.getPackageType()) .getOrderedConcreteStoresInGroup(key.getName()).stream() .map(artifactStore -> artifactStore.getKey()).collect(Collectors.toList()); if (stores.size() >= MAX_GROUP_MEMBER_SIZE_FOR_GET_MISSING) { throw new IndyWorkflowException(SC_UNPROCESSABLE_ENTITY, "Get missing info for group failed (too many members), size: " + stores.size()); } for (final StoreKey storeKey : stores) { size.addAndGet(cache.getSize(storeKey)); } break; } default: { size.addAndGet(cache.getSize(key)); break; } } dto.setSize(size.get()); return dto; } catch (final IndyDataException e) { throw new IndyWorkflowException("Failed to get info for ArtifactStore: %s.", e, key); } }
From source file:io.warp10.continuum.egress.EgressFetchHandler.java
private static void rawDump(PrintWriter pw, GTSDecoderIterator iter, boolean dedup, boolean signed, long timespan, AtomicReference<Metadata> lastMeta, AtomicLong lastCount, boolean sortMeta) throws IOException { String name = null;//from ww w. jav a 2 s .c om Map<String, String> labels = null; StringBuilder sb = new StringBuilder(); Metadata lastMetadata = lastMeta.get(); long currentCount = lastCount.get(); while (iter.hasNext()) { GTSDecoder decoder = iter.next(); if (dedup) { decoder = decoder.dedup(); } if (!decoder.next()) { continue; } long toDecodeCount = Long.MAX_VALUE; if (timespan < 0) { Metadata meta = decoder.getMetadata(); if (!meta.equals(lastMetadata)) { lastMetadata = meta; currentCount = 0; } toDecodeCount = Math.max(0, -timespan - currentCount); } GTSEncoder encoder = decoder.getEncoder(true); // // Only display the class + labels if they have changed since the previous GTS // Map<String, String> lbls = decoder.getLabels(); // // Compute the name // name = decoder.getName(); labels = lbls; sb.setLength(0); GTSHelper.encodeName(sb, name); sb.append("{"); boolean first = true; if (sortMeta) { lbls = new TreeMap<String, String>(lbls); } for (Entry<String, String> entry : lbls.entrySet()) { // // Skip owner/producer labels and any other 'private' labels // if (!signed) { if (Constants.PRODUCER_LABEL.equals(entry.getKey())) { continue; } if (Constants.OWNER_LABEL.equals(entry.getKey())) { continue; } } if (!first) { sb.append(","); } GTSHelper.encodeName(sb, entry.getKey()); sb.append("="); GTSHelper.encodeName(sb, entry.getValue()); first = false; } sb.append("}"); if (encoder.getCount() > toDecodeCount) { // We have too much data, shrink the encoder GTSEncoder enc = new GTSEncoder(); enc.safeSetMetadata(decoder.getMetadata()); while (decoder.next() && toDecodeCount > 0) { enc.addValue(decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(), decoder.getValue()); toDecodeCount--; } encoder = enc; } if (timespan < 0) { currentCount += encoder.getCount(); } if (encoder.size() > 0) { pw.print(encoder.getBaseTimestamp()); pw.print("//"); pw.print(encoder.getCount()); pw.print(" "); pw.print(sb.toString()); pw.print(" "); //pw.println(new String(OrderPreservingBase64.encode(encoder.getBytes()))); OrderPreservingBase64.encodeToWriter(encoder.getBytes(), pw); pw.write('\r'); pw.write('\n'); } } lastMeta.set(lastMetadata); lastCount.set(currentCount); }
From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java
@Test(timeout = 50_000) public void testShutdown() throws NoSuchFieldException, IllegalAccessException, InterruptedException, ExecutionException { final CountDownLatch latch = new CountDownLatch(1); final ListenableFuture future; final AtomicLong runs = new AtomicLong(0); long prior = 0; try {// w w w . j a v a 2s.com final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(), new URIExtractionNamespace.ObjectMapperFlatDataParser( URIExtractionNamespaceTest.registerTypes(new ObjectMapper())), new Period(1l), null); final String cacheId = UUID.randomUUID().toString(); final Runnable runnable = manager.getPostRunnable(namespace, factory, cacheId); future = manager.schedule(namespace, factory, new Runnable() { @Override public void run() { runnable.run(); latch.countDown(); runs.incrementAndGet(); } }, cacheId); latch.await(); Assert.assertFalse(future.isCancelled()); Assert.assertFalse(future.isDone()); prior = runs.get(); while (runs.get() <= prior) { Thread.sleep(50); } Assert.assertTrue(runs.get() > prior); } finally { lifecycle.stop(); } manager.waitForServiceToEnd(1_000, TimeUnit.MILLISECONDS); prior = runs.get(); Thread.sleep(50); Assert.assertEquals(prior, runs.get()); Field execField = NamespaceExtractionCacheManager.class .getDeclaredField("listeningScheduledExecutorService"); execField.setAccessible(true); Assert.assertTrue(((ListeningScheduledExecutorService) execField.get(manager)).isShutdown()); Assert.assertTrue(((ListeningScheduledExecutorService) execField.get(manager)).isTerminated()); }
From source file:stroom.index.server.BenchmarkIndex.java
@Override public void run() { init();//w ww. ja v a2s. c o m final long batchStartTime = System.currentTimeMillis(); final IndexShardWriterImpl[] writers = new IndexShardWriterImpl[indexShards.length]; for (int i = 0; i < writers.length; i++) { final IndexShard indexShard = indexShards[i]; writers[i] = new IndexShardWriterImpl(indexShardService, indexFields, indexShard.getIndex(), indexShard); writers[i].setRamBufferSizeMB(ramBufferMbSize); writers[i].open(true); } final AtomicLong atomicLong = new AtomicLong(); final long indexStartTime = System.currentTimeMillis(); final ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(jobSize); for (int i = 0; i < jobSize; i++) { final Runnable r = () -> { long myId; while ((myId = atomicLong.incrementAndGet()) < docCount) { try { final int idx = (int) (myId % writers.length); writers[idx].addDocument(getDocument(myId)); } catch (final Exception e) { e.printStackTrace(); } } }; threadPoolExecutor.execute(r); } threadPoolExecutor.shutdown(); // Wait for termination. while (!threadPoolExecutor.isTerminated()) { // Wait 1 second. ThreadUtil.sleep(1000); final long docsSoFar = atomicLong.get(); final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000; for (int i = 0; i < writers.length; i++) { final IndexShardWriterImpl impl = writers[i]; final IndexShard indexShard = indexShards[i]; if (secondsSoFar > 0) { final long docsPerSecond = docsSoFar / secondsSoFar; impl.sync(); LOGGER.info("run() - " + StringUtils.rightPad(ModelStringUtil.formatCsv(docsSoFar), 10) + " doc ps " + ModelStringUtil.formatCsv(docsPerSecond) + " (" + indexShard.getFileSizeString() + ")"); } if (nextCommit != null && docsSoFar > nextCommit) { impl.flush(); nextCommit = ((docsSoFar / commitCount) * commitCount) + commitCount; LOGGER.info("run() - commit " + docsSoFar + " next commit is " + nextCommit); } } } final long indexEndTime = System.currentTimeMillis(); final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000; final long docsPerSecond = atomicLong.get() / secondsSoFar; for (final IndexShardWriter writer : writers) { writer.close(); } final long batchEndTime = System.currentTimeMillis(); LOGGER.info("runWrite() - Complete"); LOGGER.info("====================="); LOGGER.info(""); LOGGER.info("Using Args"); LOGGER.info("=========="); LoggerPrintStream traceStream = LoggerPrintStream.create(LOGGER, false); traceArguments(traceStream); traceStream.close(); LOGGER.info(""); LOGGER.info("Stats"); LOGGER.info("====="); LOGGER.info("Open Time " + toMsNiceString(indexStartTime - batchStartTime)); LOGGER.info("Index Time " + toMsNiceString(indexEndTime - indexStartTime)); LOGGER.info("Close Time " + toMsNiceString(batchEndTime - indexEndTime)); LOGGER.info("Total Time " + toMsNiceString(batchEndTime - batchStartTime)); LOGGER.info(""); LOGGER.info("Final Docs PS " + ModelStringUtil.formatCsv(docsPerSecond)); traceStream = LoggerPrintStream.create(LOGGER, false); for (int i = 0; i < writers.length; i++) { LOGGER.info(""); final IndexShardWriterImpl impl = writers[i]; LOGGER.info("Writer " + StringUtils.leftPad(String.valueOf(i), 2)); LOGGER.info("========="); impl.trace(traceStream); } traceStream.close(); LOGGER.info(""); LOGGER.info("Search"); LOGGER.info("====="); try { final IndexShardSearcherImpl[] reader = new IndexShardSearcherImpl[indexShards.length]; final IndexReader[] readers = new IndexReader[indexShards.length]; for (int i = 0; i < reader.length; i++) { reader[i] = new IndexShardSearcherImpl(indexShards[i]); reader[i].open(); readers[i] = reader[i].getReader(); } for (final String arg : docArgs) { doSearchOnField(readers, arg); } doSearchOnField(readers, "multifield"); doSearchOnField(readers, "dupfield"); LOGGER.info("====="); for (int i = 0; i < reader.length; i++) { reader[i].close(); } } catch (final Exception ex) { ex.printStackTrace(); } }
From source file:com.raphfrk.craftproxyclient.gui.CraftProxyGUI.java
public long getCapacity() { final AtomicLong capacity = new AtomicLong(); try {/* w w w . jav a 2 s . co m*/ SwingUtilities.invokeAndWait(new Runnable() { public void run() { long size; try { size = Long.parseLong(desiredSize.getText()) * 1024 * 1024; } catch (NumberFormatException e) { SwingUtilities.invokeLater(new Runnable() { public void run() { GUIManager.messageBox("Unable to parse desired file cache size, using maximum"); } }); size = Long.MAX_VALUE; } capacity.set(size); } }); } catch (InvocationTargetException e) { return Long.MAX_VALUE; } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return capacity.get(); }
From source file:org.archive.crawler.admin.StatisticsTracker.java
protected void writeSourceReportTo(PrintWriter writer) { writer.print("[source] [host] [#urls]\n"); // for each source for (String sourceKey : sourceHostDistribution.keySet()) { Map<String, AtomicLong> hostCounts = sourceHostDistribution.get(sourceKey); // sort hosts by #urls SortedMap<String, AtomicLong> sortedHostCounts = getReverseSortedHostCounts(hostCounts); // for each host for (String hostKey : sortedHostCounts.keySet()) { AtomicLong hostCount = hostCounts.get(hostKey); writer.print(sourceKey.toString()); writer.print(" "); writer.print(hostKey.toString()); writer.print(" "); writer.print(hostCount.get()); writer.print("\n"); }/* w w w .j a v a2 s .com*/ } }
From source file:io.warp10.continuum.egress.EgressFetchHandler.java
private static void jsonDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean dedup, boolean signed, AtomicReference<Metadata> lastMeta, AtomicLong lastCount) throws IOException { String name = null;//ww w .j ava2 s.c o m Map<String, String> labels = null; pw.print("["); boolean hasValues = false; Metadata lastMetadata = lastMeta.get(); long currentCount = lastCount.get(); try { StringBuilder sb = new StringBuilder(); JsonSerializer serializer = new JsonSerializerFactory().create(); boolean firstgts = true; long mask = (long) (Math.random() * Long.MAX_VALUE); while (iter.hasNext()) { GTSDecoder decoder = iter.next(); if (dedup) { decoder = decoder.dedup(); } if (!decoder.next()) { continue; } long toDecodeCount = Long.MAX_VALUE; if (timespan < 0) { Metadata meta = decoder.getMetadata(); if (!meta.equals(lastMetadata)) { lastMetadata = meta; currentCount = 0; } toDecodeCount = Math.max(0, -timespan - currentCount); } // // Only display the class + labels if they have changed since the previous GTS // Map<String, String> lbls = decoder.getLabels(); // // Compute the new name // boolean displayName = false; if (null == name || (!name.equals(decoder.getName()) || !labels.equals(lbls))) { displayName = true; name = decoder.getName(); labels = lbls; sb.setLength(0); sb.append("{\"c\":"); //sb.append(gson.toJson(name)); sb.append(serializer.serialize(name)); boolean first = true; sb.append(",\"l\":{"); for (Entry<String, String> entry : lbls.entrySet()) { // // Skip owner/producer labels and any other 'private' labels // if (!signed) { if (Constants.PRODUCER_LABEL.equals(entry.getKey())) { continue; } if (Constants.OWNER_LABEL.equals(entry.getKey())) { continue; } } if (!first) { sb.append(","); } //sb.append(gson.toJson(entry.getKey())); sb.append(serializer.serialize(entry.getKey())); sb.append(":"); //sb.append(gson.toJson(entry.getValue())); sb.append(serializer.serialize(entry.getValue())); first = false; } sb.append("}"); sb.append(",\"a\":{"); first = true; for (Entry<String, String> entry : decoder.getMetadata().getAttributes().entrySet()) { if (!first) { sb.append(","); } //sb.append(gson.toJson(entry.getKey())); sb.append(serializer.serialize(entry.getKey())); sb.append(":"); //sb.append(gson.toJson(entry.getValue())); sb.append(serializer.serialize(entry.getValue())); first = false; } sb.append("}"); sb.append(",\"i\":\""); sb.append(decoder.getLabelsId() & mask); sb.append("\",\"v\":["); } long decoded = 0L; do { if (toDecodeCount == decoded) { break; } // FIXME(hbs): only display the results which match the authorized (according to token) timerange and geo zones // // Filter out any value not in the time range // if (decoder.getTimestamp() > now || (timespan >= 0 && decoder.getTimestamp() <= (now - timespan))) { continue; } decoded++; // // TODO(hbs): filter out values with no location or outside the selected geozone when a geozone was set // // Display the name only if we have at least one value to display if (displayName) { if (!firstgts) { pw.print("]},"); } pw.print(sb.toString()); firstgts = false; displayName = false; } else { pw.print(","); } hasValues = true; pw.print("["); pw.print(decoder.getTimestamp()); if (GeoTimeSerie.NO_LOCATION != decoder.getLocation()) { double[] latlon = GeoXPLib.fromGeoXPPoint(decoder.getLocation()); pw.print(","); pw.print(latlon[0]); pw.print(","); pw.print(latlon[1]); } if (GeoTimeSerie.NO_ELEVATION != decoder.getElevation()) { pw.print(","); pw.print(decoder.getElevation()); } pw.print(","); Object value = decoder.getValue(); if (value instanceof Number) { pw.print(value); } else if (value instanceof Boolean) { pw.print(Boolean.TRUE.equals(value) ? "true" : "false"); } else { //pw.print(gson.toJson(value.toString())); pw.print(serializer.serialize(value.toString())); } pw.print("]"); } while (decoder.next()); if (timespan < 0) { currentCount += decoded; } // // If displayName is still true it means we should have displayed the name but no value matched, // so set name to null so we correctly display the name for the next decoder if it has values // if (displayName) { name = null; } } } catch (Throwable t) { throw t; } finally { if (hasValues) { pw.print("]}"); } pw.print("]"); } lastMeta.set(lastMetadata); lastCount.set(currentCount); }
From source file:io.warp10.continuum.egress.EgressFetchHandler.java
/** * Output a text version of fetched data. Deduplication is done on the fly so we don't decode twice. * /* w ww . ja v a 2 s . com*/ */ private static void textDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean raw, boolean dedup, boolean signed, boolean showAttributes, AtomicReference<Metadata> lastMeta, AtomicLong lastCount, boolean sortMeta) throws IOException { String name = null; Map<String, String> labels = null; StringBuilder sb = new StringBuilder(); Metadata lastMetadata = lastMeta.get(); long currentCount = lastCount.get(); while (iter.hasNext()) { GTSDecoder decoder = iter.next(); if (!decoder.next()) { continue; } long toDecodeCount = Long.MAX_VALUE; if (timespan < 0) { Metadata meta = decoder.getMetadata(); if (!meta.equals(lastMetadata)) { lastMetadata = meta; currentCount = 0; } toDecodeCount = Math.max(0, -timespan - currentCount); } // // Only display the class + labels if they have changed since the previous GTS // Map<String, String> lbls = decoder.getLabels(); // // Compute the new name // boolean displayName = false; if (null == name || (!name.equals(decoder.getName()) || !labels.equals(lbls))) { displayName = true; name = decoder.getName(); labels = lbls; sb.setLength(0); GTSHelper.encodeName(sb, name); sb.append("{"); boolean first = true; if (sortMeta) { lbls = new TreeMap<String, String>(lbls); } for (Entry<String, String> entry : lbls.entrySet()) { // // Skip owner/producer labels and any other 'private' labels // if (!signed) { if (Constants.PRODUCER_LABEL.equals(entry.getKey())) { continue; } if (Constants.OWNER_LABEL.equals(entry.getKey())) { continue; } } if (!first) { sb.append(","); } GTSHelper.encodeName(sb, entry.getKey()); sb.append("="); GTSHelper.encodeName(sb, entry.getValue()); first = false; } sb.append("}"); if (showAttributes) { Metadata meta = decoder.getMetadata(); if (meta.getAttributesSize() > 0) { if (sortMeta) { meta.setAttributes(new TreeMap<String, String>(meta.getAttributes())); } GTSHelper.labelsToString(sb, meta.getAttributes()); } else { sb.append("{}"); } } } long timestamp = 0L; long location = GeoTimeSerie.NO_LOCATION; long elevation = GeoTimeSerie.NO_ELEVATION; Object value = null; boolean dup = true; long decoded = 0; do { if (toDecodeCount == decoded) { break; } // FIXME(hbs): only display the results which match the authorized (according to token) timerange and geo zones // // Filter out any value not in the time range // long newTimestamp = decoder.getTimestamp(); if (newTimestamp > now || (timespan >= 0 && newTimestamp <= (now - timespan))) { continue; } // // TODO(hbs): filter out values with no location or outside the selected geozone when a geozone was set // long newLocation = decoder.getLocation(); long newElevation = decoder.getElevation(); Object newValue = decoder.getValue(); dup = true; if (dedup) { if (location != newLocation || elevation != newElevation) { dup = false; } else { if (null == newValue) { // Consider nulls as duplicates (can't happen!) dup = false; } else if (newValue instanceof Number) { if (!((Number) newValue).equals(value)) { dup = false; } } else if (newValue instanceof String) { if (!((String) newValue).equals(value)) { dup = false; } } else if (newValue instanceof Boolean) { if (!((Boolean) newValue).equals(value)) { dup = false; } } } } decoded++; location = newLocation; elevation = newElevation; timestamp = newTimestamp; value = newValue; if (raw) { if (!dedup || !dup) { pw.println(GTSHelper.tickToString(sb, timestamp, location, elevation, value)); } } else { // Display the name only if we have at least one value to display // We force 'dup' to be false when we must show the name if (displayName) { pw.println(GTSHelper.tickToString(sb, decoder.getTimestamp(), decoder.getLocation(), decoder.getElevation(), decoder.getValue())); displayName = false; dup = false; } else { if (!dedup || !dup) { pw.print("="); pw.println(GTSHelper.tickToString(timestamp, location, elevation, value)); } } } } while (decoder.next()); // Update currentcount if (timespan < 0) { currentCount += decoded; } // Print any remaining value if (dedup && dup) { if (raw) { pw.println(GTSHelper.tickToString(sb, timestamp, location, elevation, value)); } else { pw.print("="); pw.println(GTSHelper.tickToString(timestamp, location, elevation, value)); } } // // If displayName is still true it means we should have displayed the name but no value matched, // so set name to null so we correctly display the name for the next decoder if it has values // if (displayName) { name = null; } } lastMeta.set(lastMetadata); lastCount.set(currentCount); }