List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run query at an increasing target QPS. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send * them./*from w w w .jav a 2 s. c o m*/ * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test. * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param startQPS start QPS. * @param deltaQPS delta QPS. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @param numIntervalsToIncreaseQPS number of intervals to increase QPS. * @throws Exception */ public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; double currentQPS = startQPS; int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS); while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); Thread.sleep(queryIntervalMs); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; reportStartTime = currentTime; numReportIntervals++; if (numReportIntervals == numIntervalsToIncreaseQPS) { // Try to find the next interval. double newQPS = currentQPS + deltaQPS; int newQueryIntervalMs; // Skip the target QPS with the same interval as the previous one. while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) { newQPS += deltaQPS; } if (newQueryIntervalMs == 0) { LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS."); } else { // Find the next interval. LOGGER.info( "--------------------------------------------------------------------------------"); LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS); int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); currentQPS = newQPS; queryIntervalMs = newQueryIntervalMs; LOGGER.info( "Increase target QPS to: {}, the following statistics are for the new target QPS.", currentQPS); } } else { int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) { startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java
/** * @throws IOException//from w w w.j a v a 2s .c o m */ @Test public void testAppend() throws IOException { final int COL_COUNT = 10; final TableName tableName = TableName.valueOf("tablename"); final byte[] row = Bytes.toBytes("row"); Reader reader = null; HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf); final AtomicLong sequenceId = new AtomicLong(1); try { // Write columns named 1, 2, 3, etc. and then values of single byte // 1, 2, 3... long timestamp = System.currentTimeMillis(); WALEdit cols = new WALEdit(); for (int i = 0; i < COL_COUNT; i++) { cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') })); } HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); HTableDescriptor htd = new HTableDescriptor(); htd.addFamily(new HColumnDescriptor("column")); log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId); log.startCacheFlush(hri.getEncodedNameAsBytes()); log.completeCacheFlush(hri.getEncodedNameAsBytes()); log.close(); Path filename = ((FSHLog) log).computeFilename(); log = null; // Now open a reader on the log and assert append worked. reader = HLogFactory.createReader(fs, filename, conf); HLog.Entry entry = reader.next(); assertEquals(COL_COUNT, entry.getEdit().size()); int idx = 0; for (KeyValue val : entry.getEdit().getKeyValues()) { assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName())); assertTrue(tableName.equals(entry.getKey().getTablename())); assertTrue(Bytes.equals(row, val.getRow())); assertEquals((byte) (idx + '0'), val.getValue()[0]); System.out.println(entry.getKey() + " " + val); idx++; } } finally { if (log != null) { log.closeAndDelete(); } if (reader != null) { reader.close(); } } }
From source file:jduagui.Controller.java
public Extension() { this.count = new AtomicLong(0); this.size = new AtomicLong(0); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl.java
public RMAppImpl(ApplicationId applicationId, RMContext rmContext, Configuration config, String name, String user, String queue, ApplicationSubmissionContext submissionContext, YarnScheduler scheduler, ApplicationMasterService masterService, long submitTime, String applicationType, Set<String> applicationTags, ResourceRequest amReq) throws IOException { this.systemClock = new SystemClock(); this.applicationId = applicationId; this.name = name; this.rmContext = rmContext; this.dispatcher = rmContext.getDispatcher(); this.handler = dispatcher.getEventHandler(); this.conf = config; this.user = user; this.queue = queue; this.submissionContext = submissionContext; this.scheduler = scheduler; this.masterService = masterService; this.submitTime = submitTime; this.startTime = this.systemClock.getTime(); this.applicationType = applicationType; this.applicationTags = applicationTags; this.amReq = amReq; int globalMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); int individualMaxAppAttempts = submissionContext.getMaxAppAttempts(); if (individualMaxAppAttempts <= 0 || individualMaxAppAttempts > globalMaxAppAttempts) { this.maxAppAttempts = globalMaxAppAttempts; LOG.warn("The specific max attempts: " + individualMaxAppAttempts + " for application: " + applicationId.getId() + " is invalid, because it is out of the range [1, " + globalMaxAppAttempts + "]. Use the global max attempts instead."); } else {/*from w ww . j av a 2 s .c o m*/ this.maxAppAttempts = individualMaxAppAttempts; } this.attemptFailuresValidityInterval = submissionContext.getAttemptFailuresValidityInterval(); if (this.attemptFailuresValidityInterval > 0) { LOG.info("The attemptFailuresValidityInterval for the application: " + this.applicationId + " is " + this.attemptFailuresValidityInterval + "."); } ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); this.stateMachine = stateMachineFactory.make(this); this.callerContext = CallerContext.getCurrent(); long localLogAggregationStatusTimeout = conf.getLong(YarnConfiguration.LOG_AGGREGATION_STATUS_TIME_OUT_MS, YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS); if (localLogAggregationStatusTimeout <= 0) { this.logAggregationStatusTimeout = YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS; } else { this.logAggregationStatusTimeout = localLogAggregationStatusTimeout; } this.logAggregationEnabled = conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED); if (this.logAggregationEnabled) { this.logAggregationStatusForAppReport = LogAggregationStatus.NOT_START; } else { this.logAggregationStatusForAppReport = LogAggregationStatus.DISABLED; } maxLogAggregationDiagnosticsInMemory = conf.getInt( YarnConfiguration.RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY, YarnConfiguration.DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY); // amBlacklistingEnabled can be configured globally // Just use the global values amBlacklistingEnabled = conf.getBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED, YarnConfiguration.DEFAULT_AM_SCHEDULING_NODE_BLACKLISTING_ENABLED); if (amBlacklistingEnabled) { blacklistDisableThreshold = conf.getFloat( YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD, YarnConfiguration.DEFAULT_AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD); // Verify whether blacklistDisableThreshold is valid. And for invalid // threshold, reset to global level blacklistDisableThreshold // configured. if (blacklistDisableThreshold < MINIMUM_AM_BLACKLIST_THRESHOLD_VALUE || blacklistDisableThreshold > MAXIMUM_AM_BLACKLIST_THRESHOLD_VALUE) { blacklistDisableThreshold = YarnConfiguration.DEFAULT_AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD; } } isAppRotatingCryptoMaterial = new AtomicBoolean(false); materialRotationStartTime = new AtomicLong(-1L); }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java
/** * Test that we can visit entries before they are appended * @throws Exception//ww w .j a va 2 s.com */ @Test public void testVisitors() throws Exception { final int COL_COUNT = 10; final TableName tableName = TableName.valueOf("tablename"); final byte[] row = Bytes.toBytes("row"); HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf); final AtomicLong sequenceId = new AtomicLong(1); try { DumbWALActionsListener visitor = new DumbWALActionsListener(); log.registerWALActionsListener(visitor); long timestamp = System.currentTimeMillis(); HTableDescriptor htd = new HTableDescriptor(); htd.addFamily(new HColumnDescriptor("column")); HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); for (int i = 0; i < COL_COUNT; i++) { WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[] { (byte) (i + '0') })); log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId); } assertEquals(COL_COUNT, visitor.increments); log.unregisterWALActionsListener(visitor); WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(11)), timestamp, new byte[] { (byte) (11 + '0') })); log.append(hri, tableName, cols, System.currentTimeMillis(), htd, sequenceId); assertEquals(COL_COUNT, visitor.increments); } finally { if (log != null) log.closeAndDelete(); } }
From source file:de.javakaffee.kryoserializers.KryoTest.java
@DataProvider(name = "typesAsSessionAttributesProvider") protected Object[][] createTypesAsSessionAttributesData() { return new Object[][] { { Boolean.class, Boolean.TRUE }, { String.class, "42" }, { StringBuilder.class, new StringBuilder("42") }, { StringBuffer.class, new StringBuffer("42") }, { Class.class, String.class }, { Long.class, new Long(42) }, { Integer.class, new Integer(42) }, { Character.class, new Character('c') }, { Byte.class, new Byte("b".getBytes()[0]) }, { Double.class, new Double(42d) }, { Float.class, new Float(42f) }, { Short.class, new Short((short) 42) }, { BigDecimal.class, new BigDecimal(42) }, { AtomicInteger.class, new AtomicInteger(42) }, { AtomicLong.class, new AtomicLong(42) }, { MutableInt.class, new MutableInt(42) }, { Integer[].class, new Integer[] { 42 } }, { Date.class, new Date(System.currentTimeMillis() - 10000) }, { Calendar.class, Calendar.getInstance() }, { Currency.class, Currency.getInstance("EUR") }, { ArrayList.class, new ArrayList<String>(Arrays.asList("foo")) }, { int[].class, new int[] { 1, 2 } }, { long[].class, new long[] { 1, 2 } }, { short[].class, new short[] { 1, 2 } }, { float[].class, new float[] { 1, 2 } }, { double[].class, new double[] { 1, 2 } }, { int[].class, new int[] { 1, 2 } }, { byte[].class, "42".getBytes() }, { char[].class, "42".toCharArray() }, { String[].class, new String[] { "23", "42" } }, { Person[].class, new Person[] { createPerson("foo bar", Gender.MALE, 42) } } }; }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java
@Test public void testLogCleaning() throws Exception { LOG.info("testLogCleaning"); final TableName tableName = TableName.valueOf("testLogCleaning"); final TableName tableName2 = TableName.valueOf("testLogCleaning2"); HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf); final AtomicLong sequenceId = new AtomicLong(1); try {//from w w w . j av a 2s. c o m HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); HRegionInfo hri2 = new HRegionInfo(tableName2, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); // Add a single edit and make sure that rolling won't remove the file // Before HBASE-3198 it used to delete it addEdits(log, hri, tableName, 1, sequenceId); log.rollWriter(); assertEquals(1, ((FSHLog) log).getNumRolledLogFiles()); // See if there's anything wrong with more than 1 edit addEdits(log, hri, tableName, 2, sequenceId); log.rollWriter(); assertEquals(2, ((FSHLog) log).getNumRolledLogFiles()); // Now mix edits from 2 regions, still no flushing addEdits(log, hri, tableName, 1, sequenceId); addEdits(log, hri2, tableName2, 1, sequenceId); addEdits(log, hri, tableName, 1, sequenceId); addEdits(log, hri2, tableName2, 1, sequenceId); log.rollWriter(); assertEquals(3, ((FSHLog) log).getNumRolledLogFiles()); // Flush the first region, we expect to see the first two files getting // archived. We need to append something or writer won't be rolled. addEdits(log, hri2, tableName2, 1, sequenceId); log.startCacheFlush(hri.getEncodedNameAsBytes()); log.completeCacheFlush(hri.getEncodedNameAsBytes()); log.rollWriter(); assertEquals(2, ((FSHLog) log).getNumRolledLogFiles()); // Flush the second region, which removes all the remaining output files // since the oldest was completely flushed and the two others only contain // flush information addEdits(log, hri2, tableName2, 1, sequenceId); log.startCacheFlush(hri2.getEncodedNameAsBytes()); log.completeCacheFlush(hri2.getEncodedNameAsBytes()); log.rollWriter(); assertEquals(0, ((FSHLog) log).getNumRolledLogFiles()); } finally { if (log != null) log.closeAndDelete(); } }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java
/** * Create an HRegion with the result of a HLog split and test we only see the * good edits/*from ww w .j ava 2 s. co m*/ * @throws Exception */ @Test public void testReplayEditsWrittenIntoWAL() throws Exception { final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL"); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); deleteDir(basedir); final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region2); final HLog wal = createWAL(this.conf); final byte[] rowName = tableName.getName(); final byte[] regionName = hri.getEncodedNameAsBytes(); final AtomicLong sequenceId = new AtomicLong(1); // Add 1k to each family. final int countPerFamily = 1000; for (HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, sequenceId); } // Add a cache flush, shouldn't have any effect wal.startCacheFlush(regionName); wal.completeCacheFlush(regionName); // Add an edit to another family, should be skipped. WALEdit edit = new WALEdit(); long now = ee.currentTimeMillis(); edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName)); wal.append(hri, tableName, edit, now, htd, sequenceId); // Delete the c family to verify deletes make it over. edit = new WALEdit(); now = ee.currentTimeMillis(); edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily)); wal.append(hri, tableName, edit, now, htd, sequenceId); // Sync. wal.sync(); // Set down maximum recovery so we dfsclient doesn't linger retrying something // long gone. HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1); // Make a new conf and a new fs for the splitter to run on so we can take // over old wal. final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction() { public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); // 100k seems to make for about 4 flushes during HRegion#initialize. newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100); // Make a new wal for new region. HLog newWal = createWAL(newConf); final AtomicInteger flushcount = new AtomicInteger(0); try { final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { protected FlushResult internalFlushcache(final HLog wal, final long myseqid, MonitoredTask status) throws IOException { LOG.info("InternalFlushCache Invoked"); FlushResult fs = super.internalFlushcache(wal, myseqid, Mockito.mock(MonitoredTask.class)); flushcount.incrementAndGet(); return fs; }; }; long seqid = region.initialize(); // We flushed during init. assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0); assertTrue(seqid - 1 == sequenceId.get()); Get get = new Get(rowName); Result result = region.get(get); // Make sure we only see the good edits assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size()); region.close(); } finally { newWal.closeAndDelete(); } return null; } }); }
From source file:org.apache.tinkerpop.gremlin.structure.TransactionTest.java
@Test @org.junit.Ignore("Ignoring this test for now. Perhaps it will have relelvance later. see - https://github.org/apache/tinkerpop/tinkerpop3/issues/31") @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = Graph.Features.GraphFeatures.class, feature = Graph.Features.GraphFeatures.FEATURE_TRANSACTIONS) public void shouldSupportTransactionIsolationWithSeparateThreads() throws Exception { // one thread modifies the graph and a separate thread reads before the transaction is committed. // the expectation is that the changes in the transaction are isolated to the thread that made the change // and the second thread should not see the change until commit() in the first thread. final CountDownLatch latchCommit = new CountDownLatch(1); final CountDownLatch latchFirstRead = new CountDownLatch(1); final CountDownLatch latchSecondRead = new CountDownLatch(1); final Thread threadMod = new Thread() { @Override//ww w .j a va2 s .co m public void run() { graph.addVertex(); latchFirstRead.countDown(); try { latchCommit.await(); } catch (InterruptedException ie) { throw new RuntimeException(ie); } graph.tx().commit(); latchSecondRead.countDown(); } }; threadMod.start(); final AtomicLong beforeCommitInOtherThread = new AtomicLong(0); final AtomicLong afterCommitInOtherThreadButBeforeRollbackInCurrentThread = new AtomicLong(0); final AtomicLong afterCommitInOtherThread = new AtomicLong(0); final Thread threadRead = new Thread() { @Override public void run() { try { latchFirstRead.await(); } catch (InterruptedException ie) { throw new RuntimeException(ie); } // reading vertex before tx from other thread is committed...should have zero vertices beforeCommitInOtherThread.set(IteratorUtils.count(graph.vertices())); latchCommit.countDown(); try { latchSecondRead.await(); } catch (InterruptedException ie) { throw new RuntimeException(ie); } // tx in other thread is committed...should have one vertex. rollback first to start a new tx // to get a fresh read given the commit afterCommitInOtherThreadButBeforeRollbackInCurrentThread.set(IteratorUtils.count(graph.vertices())); graph.tx().rollback(); afterCommitInOtherThread.set(IteratorUtils.count(graph.vertices())); } }; threadRead.start(); threadMod.join(); threadRead.join(); assertEquals(0l, beforeCommitInOtherThread.get()); assertEquals(0l, afterCommitInOtherThreadButBeforeRollbackInCurrentThread.get()); assertEquals(1l, afterCommitInOtherThread.get()); }