List of usage examples for java.util.concurrent.atomic AtomicLong addAndGet
public final long addAndGet(long delta)
From source file:com.btoddb.fastpersitentqueue.flume.FpqChannelTest.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; channel.setMaxTransactionSize(2000); final int popBatchSize = 100; channel.setMaxMemorySegmentSizeInBytes(10000000); channel.setMaxJournalFileSize(10000000); channel.setMaxJournalDurationInMs(30000); channel.setFlushPeriodInMs(1000);/*from ww w . j a v a 2s . c om*/ channel.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); channel.start(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); Transaction tx = channel.getTransaction(); tx.begin(); MyEvent event1 = new MyEvent(); event1.addHeader("x", String.valueOf(x)).setBody(new byte[numEntries - 8]); // take out size of long channel.put(event1); tx.commit(); tx.close(); Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !channel.isEmpty()) { try { Transaction tx = channel.getTransaction(); tx.begin(); Event event; int count = popBatchSize; while (null != (event = channel.take()) && count-- > 0) { popSum.addAndGet(Long.valueOf(event.getHeaders().get("x"))); numPops.incrementAndGet(); } tx.commit(); tx.close(); Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(channel.isEmpty(), is(true)); assertThat(pushSum.get(), is(popSum.get())); }
From source file:org.commonjava.indy.core.ctl.NfcController.java
public NotFoundCacheInfoDTO getInfo(StoreKey key) throws IndyWorkflowException { NotFoundCacheInfoDTO dto = new NotFoundCacheInfoDTO(); final AtomicLong size = new AtomicLong(0); try {/*from w ww. ja v a2 s . c om*/ switch (key.getType()) { case group: { //Warn: This is very expensive if group holds thousands of repositories final List<StoreKey> stores = storeManager.query().packageType(key.getPackageType()) .getOrderedConcreteStoresInGroup(key.getName()).stream() .map(artifactStore -> artifactStore.getKey()).collect(Collectors.toList()); if (stores.size() >= MAX_GROUP_MEMBER_SIZE_FOR_GET_MISSING) { throw new IndyWorkflowException(SC_UNPROCESSABLE_ENTITY, "Get missing info for group failed (too many members), size: " + stores.size()); } for (final StoreKey storeKey : stores) { size.addAndGet(cache.getSize(storeKey)); } break; } default: { size.addAndGet(cache.getSize(key)); break; } } dto.setSize(size.get()); return dto; } catch (final IndyDataException e) { throw new IndyWorkflowException("Failed to get info for ArtifactStore: %s.", e, key); } }
From source file:com.btoddb.fastpersitentqueue.InMemorySegmentMgrTest.java
@Test public void testThreading() throws IOException, ExecutionException { final int entrySize = 1000; final int numEntries = 3000; final int numPushers = 3; int numPoppers = 3; final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); mgr.setMaxSegmentSizeInBytes(10000); mgr.init();//from w w w. jav a 2 s . c om ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = idGen.incrementAndGet(); pushSum.addAndGet(x); FpqEntry entry = new FpqEntry(x, new byte[entrySize]); mgr.push(entry); if (x % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !mgr.isEmpty()) { try { FpqEntry entry; while (null != (entry = mgr.pop())) { if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } popSum.addAndGet(entry.getId()); numPops.incrementAndGet(); Thread.sleep(popRand.nextInt(5)); } } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(popSum.get(), is(pushSum.get())); assertThat(mgr.getNumberOfEntries(), is(0L)); assertThat(mgr.getNumberOfActiveSegments(), is(1)); assertThat(mgr.getSegments(), hasSize(1)); assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty())); // make sure we tested paging in/out assertThat(mgr.getNumberOfSwapOut(), is(greaterThan(0L))); assertThat(mgr.getNumberOfSwapIn(), is(mgr.getNumberOfSwapOut())); }
From source file:org.archive.modules.writer.WARCWriterProcessor.java
protected void addStats(Map<String, Map<String, Long>> substats) { for (String key : substats.keySet()) { // intentionally redundant here -- if statement avoids creating // unused empty map every time; putIfAbsent() ensures thread safety if (stats.get(key) == null) { stats.putIfAbsent(key, new ConcurrentHashMap<String, AtomicLong>()); }/*from w w w. j a v a2 s .c o m*/ for (String subkey : substats.get(key).keySet()) { AtomicLong oldValue = stats.get(key).get(subkey); if (oldValue == null) { oldValue = stats.get(key).putIfAbsent(subkey, new AtomicLong(substats.get(key).get(subkey))); } if (oldValue != null) { oldValue.addAndGet(substats.get(key).get(subkey)); } } } }
From source file:org.voltdb.TableHelper.java
/** * Delete rows in a VoltDB table that has a bigint pkey where pkey values are odd. * Works best when pkey values are contiguous and start around 0. * * Exists mostly to force compaction on tables loaded with fillTableWithBigintPkey. * Though if you have an even number of sites, this won't work. It'll need to be * updated to delete some other pattern that's a bit more generic. Right now it * works great for my one-site testing./*w w w .jav a 2 s .c o m*/ * */ public static long deleteEveryNRows(VoltTable table, Client client, int n) throws Exception { // find the primary key, assume first col if not found int pkeyColIndex = getBigintPrimaryKeyIndexIfExists(table); if (pkeyColIndex == -1) { pkeyColIndex = 0; assert (table.getColumnType(0).isInteger()); } String pkeyColName = table.getColumnName(pkeyColIndex); VoltTable result = client .callProcedure("@AdHoc", String.format("select %s from %s order by %s desc limit 1;", pkeyColName, TableHelper.getTableName(table), pkeyColName)) .getResults()[0]; long maxId = result.getRowCount() > 0 ? result.asScalarLong() : 0; System.out.printf("Deleting odd rows with pkey ids in the range 0-%d\n", maxId); // track outstanding responses so 10k can be out at a time final AtomicInteger outstanding = new AtomicInteger(0); final AtomicLong deleteCount = new AtomicLong(0); ProcedureCallback callback = new ProcedureCallback() { @Override public void clientCallback(ClientResponse clientResponse) throws Exception { outstanding.decrementAndGet(); if (clientResponse.getStatus() != ClientResponse.SUCCESS) { System.out.println("Error in deleter callback:"); System.out.println(((ClientResponseImpl) clientResponse).toJSONString()); assert (false); } VoltTable result = clientResponse.getResults()[0]; long modified = result.asScalarLong(); assert (modified <= 1); deleteCount.addAndGet(modified); } }; // delete 100k rows at a time until nothing comes back long deleted = 0; final String deleteProcName = table.m_extraMetadata.name.toUpperCase() + ".delete"; for (int i = 1; i <= maxId; i += n) { client.callProcedure(callback, deleteProcName, i); outstanding.incrementAndGet(); deleted++; if ((deleted % 100000) == 0) { System.out.printf("Sent %d total delete invocations (%.1f%% of range).\n", deleted, (i * 100.0) / maxId); } // block while 1000 txns are outstanding while (outstanding.get() >= 1000) { Thread.yield(); } } // block until all calls have returned while (outstanding.get() > 0) { Thread.yield(); } System.out.printf("Deleted %d odd rows\n", deleteCount.get()); return deleteCount.get(); }
From source file:com.btoddb.fastpersitentqueue.FpqIT.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; fpq1.setMaxTransactionSize(2000);// w ww .j a v a 2 s.c o m final int popBatchSize = 100; fpq1.setMaxMemorySegmentSizeInBytes(10000000); fpq1.setMaxJournalFileSize(10000000); fpq1.setMaxJournalDurationInMs(30000); fpq1.setFlushPeriodInMs(1000); fpq1.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); fpq1.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); fpq1.beginTransaction(); fpq1.push(bb.array()); fpq1.commit(); if ((x + 1) % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) { try { fpq1.beginTransaction(); try { Collection<FpqEntry> entries = fpq1.pop(popBatchSize); if (null == entries) { Thread.sleep(100); continue; } for (FpqEntry entry : entries) { ByteBuffer bb = ByteBuffer.wrap(entry.getData()); popSum.addAndGet(bb.getLong()); if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } } numPops.addAndGet(entries.size()); fpq1.commit(); entries.clear(); } finally { if (fpq1.isTransactionActive()) { fpq1.rollback(); } } Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(fpq1.getNumberOfEntries(), is(0L)); assertThat(pushSum.get(), is(popSum.get())); assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1)); assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1)); assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty())); assertThat( FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }
From source file:com.milaboratory.core.alignment.KAlignerTest.java
@Test public void testRandomCorrectnessConcurrent() throws Exception { KAlignerParameters p = gParams.clone().setMapperKValue(6).setAlignmentStopPenalty(Integer.MIN_VALUE) .setMapperAbsoluteMinScore(2.1f).setMapperMinSeedsDistance(4); p.setScoring(new LinearGapAlignmentScoring(NucleotideSequence.ALPHABET, ScoringUtils.getSymmetricMatrix(4, -4, 4), -5)).setMaxAdjacentIndels(2); KAlignerParameters[] params = new KAlignerParameters[] { p.clone(), p.clone().setFloatingLeftBound(true), p.clone().setFloatingRightBound(true), p.clone().setFloatingLeftBound(true).setFloatingRightBound(true) }; RandomDataGenerator rdi = new RandomDataGenerator(new Well19937c(127368647891L)); final int baseSize = its(400, 2000); final int total = its(3000, 30000); final int threadCount = 20; int i, id;/* w w w . j ava2 s .c om*/ final NucleotideMutationModel mutationModel = MutationModels.getEmpiricalNucleotideMutationModel() .multiplyProbabilities(2.0); mutationModel.reseed(12343L); for (final KAlignerParameters parameters : params) { final KAligner aligner = new KAligner(parameters); final AtomicInteger correct = new AtomicInteger(0), incorrect = new AtomicInteger(0), miss = new AtomicInteger(0), scoreError = new AtomicInteger(0), random = new AtomicInteger(0); final List<NucleotideSequence> ncs = new ArrayList<>(baseSize); for (i = 0; i < baseSize; ++i) { NucleotideSequence reference = randomSequence(NucleotideSequence.ALPHABET, rdi, 100, 300); ncs.add(reference); aligner.addReference(reference); } final AtomicInteger counter = new AtomicInteger(total); Thread[] threads = new Thread[threadCount]; final AtomicLong time = new AtomicLong(0L); final AtomicLong seedCounter = new AtomicLong(1273L); for (i = 0; i < threadCount; ++i) { threads[i] = new Thread() { @Override public void run() { long timestamp; //Different seed for different thread. RandomDataGenerator rdi = new RandomDataGenerator( new Well19937c(seedCounter.addAndGet(117L))); while (counter.decrementAndGet() >= 0) { int id = rdi.nextInt(0, baseSize - 1); NucleotideSequence ref = ncs.get(id); int trimRight, trimLeft; boolean addLeft, addRight; if (parameters.isFloatingLeftBound()) { trimLeft = rdi.nextInt(0, ref.size() / 3); addLeft = true; } else { if (rdi.nextInt(0, 1) == 0) { trimLeft = 0; addLeft = true; } else { trimLeft = rdi.nextInt(0, ref.size() / 3); addLeft = false; } } if (parameters.isFloatingRightBound()) { trimRight = rdi.nextInt(0, ref.size() / 3); addRight = true; } else { if (rdi.nextInt(0, 1) == 0) { trimRight = 0; addRight = true; } else { trimRight = rdi.nextInt(0, ref.size() / 3); addRight = false; } } NucleotideSequence subSeq = ref.getRange(trimLeft, ref.size() - trimRight); NucleotideSequence left = addLeft ? randomSequence(NucleotideSequence.ALPHABET, rdi, 10, 30) : EMPTY; NucleotideSequence right = addRight ? randomSequence(NucleotideSequence.ALPHABET, rdi, 10, 30) : EMPTY; int[] subSeqMutations; Mutations<NucleotideSequence> mmutations; synchronized (mutationModel) { mmutations = generateMutations(subSeq, mutationModel); subSeqMutations = mmutations.getAllMutations(); } float actionScore = AlignmentUtils.calculateScore(parameters.getScoring(), subSeq.size(), mmutations); int indels = 0; for (int mut : subSeqMutations) if (isDeletion(mut) || isInsertion(mut)) ++indels; NucleotideSequence target = left.concatenate(mutate(subSeq, subSeqMutations)) .concatenate(right); timestamp = System.nanoTime(); KAlignmentResult result = aligner.align(target); time.addAndGet(System.nanoTime() - timestamp); boolean found = false; for (KAlignmentHit hit : result.hits) { if (hit.getId() == id) { //System.out.println(hit.getAlignmentScore()); found = true; if (!parameters.isFloatingLeftBound()) Assert.assertTrue(hit.getAlignment().getSequence1Range().getFrom() == 0 || hit.getAlignment().getSequence2Range().getFrom() == 0); if (!parameters.isFloatingRightBound()) Assert.assertTrue(hit.getAlignment().getSequence1Range().getTo() == ref .size() || hit.getAlignment().getSequence2Range().getTo() == target.size()); if (hit.getAlignment().getScore() < actionScore && indels <= parameters.getMaxAdjacentIndels()) { scoreError.incrementAndGet(); //System.out.println(target); //System.out.println(left); //printAlignment(subSeq, subSeqMutations); //System.out.println(right); //printHitAlignment(hit); ////printAlignment(ncs.get(hit.getId()).getRange(hit.getAlignment().getSequence1Range()), //// hit.getAlignment().getMutations()); //found = true; } } else { //printHitAlignment(hit); //System.out.println(hit.getAlignmentScore()); incorrect.incrementAndGet(); } } if (found) correct.incrementAndGet(); else { if (indels <= parameters.getMaxAdjacentIndels()) { miss.incrementAndGet(); //System.out.println(target); //System.out.println(left); //printAlignment(subSeq, subSeqMutations); //System.out.println(right); } } NucleotideSequence randomSequence = randomSequence(NucleotideSequence.ALPHABET, rdi, target.size() - 1, target.size()); for (KAlignmentHit hit : aligner.align(randomSequence).hits) { hit.calculateAlignmnet(); if (hit.getAlignment().getScore() >= 110.0) random.incrementAndGet(); } } } }; } for (i = 0; i < threadCount; ++i) threads[i].start(); for (i = 0; i < threadCount; ++i) threads[i].join(); System.out.println("C=" + correct.get() + ";I=" + incorrect.get() + ";M=" + miss.get() + ";ScE=" + scoreError.get() + ";R=" + (1.0 * random.get() / baseSize / total) + " AlignmentTime = " + time(time.get() / total)); Assert.assertEquals(1.0, 1.0 * correct.get() / total, 0.01); Assert.assertEquals(0.0, 1.0 * incorrect.get() / total, 0.001); Assert.assertEquals(0.0, 1.0 * miss.get() / total, 0.001); Assert.assertEquals(0.0, 1.0 * scoreError.get() / total, 0.001); Assert.assertEquals(0.0, 1.0 * random.get() / total / baseSize, 5E-6); } }
From source file:org.structr.javaparser.JavaParserModule.java
/** * Add compilation units of all jar files found in the given folder to the index. * * @param folderPath//from w ww . j av a 2 s . c o m */ public void addJarsToIndex(final String folderPath) { logger.info("Starting adding jar files in " + folderPath); final CombinedTypeSolver typeSolver = new CombinedTypeSolver(); typeSolver.add(new ReflectionTypeSolver()); final AtomicLong count = new AtomicLong(0); try { Files.newDirectoryStream(Paths.get(folderPath), path -> path.toString().endsWith(".jar")) .forEach((file) -> { try { typeSolver.add(new JarTypeSolver(new FileInputStream(file.toFile()))); count.addAndGet(1L); } catch (IOException ex) { } }); } catch (IOException ex) { } logger.info("Added " + count.toString() + " jar files to the type solver"); typeSolver.add(structrTypeSolver); facade = JavaParserFacade.get(typeSolver); logger.info("Done with adding jar files in " + folderPath); }
From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java
@Test public void testThreading() throws IOException, ExecutionException { final int numEntries = 10000; final int numPushers = 3; int numPoppers = 3; final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final ConcurrentLinkedQueue<FpqEntry> events = new ConcurrentLinkedQueue<FpqEntry>(); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); mgr.setMaxJournalFileSize(1000);// w ww .j a v a 2s . com mgr.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = idGen.incrementAndGet(); FpqEntry entry = mgr.append(new FpqEntry(x, new byte[100])); events.offer(entry); pushSum.addAndGet(x); if (x % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !events.isEmpty()) { try { FpqEntry entry; while (null != (entry = events.poll())) { if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } popSum.addAndGet(entry.getId()); numPops.incrementAndGet(); mgr.reportTake(entry); Thread.sleep(popRand.nextInt(5)); } } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(popSum.get(), is(pushSum.get())); assertThat(mgr.getJournalIdMap().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }
From source file:com.alibaba.druid.benckmark.pool.Case2.java
private void p0(final DataSource dataSource, String name, int threadCount) throws Exception { final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(threadCount); final AtomicLong blockedStat = new AtomicLong(); final AtomicLong waitedStat = new AtomicLong(); for (int i = 0; i < threadCount; ++i) { Thread thread = new Thread() { public void run() { try { startLatch.await();/* w w w . j av a 2 s . c o m*/ long threadId = Thread.currentThread().getId(); long startBlockedCount, startWaitedCount; { ThreadInfo threadInfo = ManagementFactory.getThreadMXBean().getThreadInfo(threadId); startBlockedCount = threadInfo.getBlockedCount(); startWaitedCount = threadInfo.getWaitedCount(); } for (int i = 0; i < LOOP_COUNT; ++i) { Connection conn = dataSource.getConnection(); conn.close(); } ThreadInfo threadInfo = ManagementFactory.getThreadMXBean().getThreadInfo(threadId); long blockedCount = threadInfo.getBlockedCount() - startBlockedCount; long waitedCount = threadInfo.getWaitedCount() - startWaitedCount; blockedStat.addAndGet(blockedCount); waitedStat.addAndGet(waitedCount); } catch (Exception ex) { ex.printStackTrace(); } endLatch.countDown(); } }; thread.start(); } long startMillis = System.currentTimeMillis(); long startYGC = TestUtil.getYoungGC(); long startFullGC = TestUtil.getFullGC(); startLatch.countDown(); endLatch.await(); long millis = System.currentTimeMillis() - startMillis; long ygc = TestUtil.getYoungGC() - startYGC; long fullGC = TestUtil.getFullGC() - startFullGC; System.out.println("thread " + threadCount + " " + name + " millis : " + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC " + fullGC + " blockedCount " + blockedStat.get() + " waitedCount " + waitedStat.get()); }