List of usage examples for java.util.concurrent.atomic AtomicInteger decrementAndGet
public final int decrementAndGet()
From source file:com.ibm.jaggr.core.impl.deps.DepTree.java
/** * Object constructor. Attempts to de-serialize the cached dependency lists * from disk and then validates the dependency lists based on last-modified * dates, looking for any new or removed files. If the cached dependency * list data cannot be de-serialized, new lists are constructed. Once the * dependency lists have been validated, the list data is serialized back * out to disk.//from w w w. j a v a 2 s .c o m * * @param paths * Collection of URIs which specify the target resources * to be scanned for javascript files. * @param aggregator * The servlet instance for this object * @param stamp * timestamp associated with external override/customization * resources that are check on every server restart * @param clean * If true, then the dependency lists are generated from scratch * rather than by de-serializing and then validating the cached * dependency lists. * @param validateDeps * If true, then validate existing cached dependencies using * file last-modified times. * @throws IOException */ public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps) throws IOException { final String sourceMethod = "<ctor>"; //$NON-NLS-1$ boolean isTraceLogging = log.isLoggable(Level.FINER); if (isTraceLogging) { log.entering(DepTree.class.getName(), sourceMethod, new Object[] { paths, aggregator, stamp, clean, validateDeps }); } this.stamp = stamp; IConfig config = aggregator.getConfig(); rawConfig = config.toString(); cacheBust = AggregatorUtil.getCacheBust(aggregator); File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME); File cacheFile = new File(cacheDir, CACHE_FILE); /* * The de-serialized dependency map. If we have a cached dependency map, * then it will be validated against the last-modified dates of the * current files and only the files that have changed will need to be * re-parsed to update the dependency lists. */ DepTree cached = null; if (!clean) { // If we're not starting clean, try to de-serialize the map from // cache try { ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile)); try { if (isTraceLogging) { log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$ } cached = (DepTree) is.readObject(); } finally { try { is.close(); } catch (Exception ignore) { } } } catch (FileNotFoundException e) { /* * Not an error. Just means that the cache file hasn't been * written yet or else it's been deleted. */ if (log.isLoggable(Level.INFO)) log.log(Level.INFO, Messages.DepTree_1); } catch (Exception e) { if (log.isLoggable(Level.SEVERE)) log.log(Level.SEVERE, e.getMessage(), e); } } // If the cacheBust config param has changed, then do a clean build // of the dependencies. if (cached != null) { if (stamp == 0) { // no init stamp provided. Preserve the cached one. stamp = cached.stamp; } if (stamp > cached.stamp) { // init stamp has been updated. Validate dependencies. validateDeps = true; } if (!StringUtils.equals(cacheBust, cached.cacheBust)) { if (isTraceLogging) { log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$ } if (log.isLoggable(Level.INFO)) { log.info(Messages.DepTree_2); } cached = null; } if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) { if (isTraceLogging) { log.finer("Current config = " + rawConfig); //$NON-NLS-1$ log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$ } validateDeps = true; } } /* * If we de-serialized a previously saved dependency map, then go with * that. */ if (cached != null && !validateDeps && !clean) { depMap = cached.depMap; fromCache = true; return; } else if (isTraceLogging) { log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$ + ", clean = " + clean); //$NON-NLS-1$ } // Initialize the dependency map depMap = new ConcurrentHashMap<URI, DepTreeNode>(); // This can take a while, so print something to the console String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() }); ConsoleService cs = new ConsoleService(); cs.println(msg); if (log.isLoggable(Level.INFO)) { log.info(msg); } // Make sure that all the paths are unique and orthogonal paths = DepUtils.removeRedundantPaths(paths); /* * Create the thread pools, one for the tree builders and one for the * parsers. Since a tree builder thread will wait for all the outstanding * parser threads started by that builder to complete, we need to use two * independent thread pools to guard against the possibility of deadlock * caused by all the threads in the pool being consumed by tree builders * and leaving none available to service the parsers. */ final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME), parserTG = new ThreadGroup(JSPARSER_TGNAME); ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME, new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() })); } }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(parserTG, r, MessageFormat.format(THREADNAME, new Object[] { parserTG.getName(), parserTG.activeCount() })); } }); // Counter to keep track of number of tree builder threads started AtomicInteger treeBuilderCount = new AtomicInteger(0); // The completion services for the thread pools final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc); CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>( treeBuilderExc); Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator)); // Start the tree builder threads to process the paths for (final URI path : paths) { /* * Create or get from cache the root node for this path and * add it to the new map. */ DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$ DepTreeNode cachedNode = null; if (cached != null) { cachedNode = cached.depMap.get(path); if (log.isLoggable(Level.INFO)) { log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path })); } } else { if (log.isLoggable(Level.INFO)) { log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path })); } } depMap.put(path, root); treeBuilderCount.incrementAndGet(); treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions)); } // List of parser exceptions LinkedList<Exception> parserExceptions = new LinkedList<Exception>(); /* * Pull the completed tree builder tasks from the completion queue until * all the paths have been processed */ while (treeBuilderCount.decrementAndGet() >= 0) { try { DepTreeBuilder.Result result = treeBuilderCs.take().get(); if (log.isLoggable(Level.INFO)) { log.info(MessageFormat.format(Messages.DepTree_6, new Object[] { result.parseCount, result.dirName })); } } catch (Exception e) { if (log.isLoggable(Level.SEVERE)) log.log(Level.SEVERE, e.getMessage(), e); parserExceptions.add(e); } } // shutdown the thread pools now that we're done with them parserExc.shutdown(); treeBuilderExc.shutdown(); // If parser exceptions occurred, then rethrow the first one if (parserExceptions.size() > 0) { throw new RuntimeException(parserExceptions.get(0)); } // Prune dead nodes (folder nodes with no children) for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) { entry.getValue().prune(); } /* * Make sure the cache directory exists before we try to serialize the * dependency map. */ if (!cacheDir.exists()) if (!cacheDir.mkdirs()) { throw new IOException( MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() })); } // Serialize the map to the cache directory ObjectOutputStream os; os = new ObjectOutputStream(new FileOutputStream(cacheFile)); try { if (isTraceLogging) { log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$ } os.writeObject(this); } finally { try { os.close(); } catch (Exception ignore) { } } msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() }); // Output that we're done. cs.println(msg); if (log.isLoggable(Level.INFO)) { log.info(msg); } if (isTraceLogging) { log.exiting(DepTree.class.getName(), sourceMethod); } }
From source file:org.deeplearning4j.models.glove.Glove.java
public void doIteration(final int i, List<Pair<String, String>> pairList, final Counter<Integer> errorPerIteration, final AtomicInteger processed, final AtomicInteger countUp) { log.info("Iteration " + i); if (shuffle)//from w w w .java 2s. c o m Collections.shuffle(pairList, new java.util.Random()); List<List<Pair<String, String>>> miniBatches = Lists.partition(pairList, batchSize); ActorSystem actor = ActorSystem.create(); Parallelization.iterateInParallel(miniBatches, new Parallelization.RunnableWithParams<List<Pair<String, String>>>() { @Override public void run(List<Pair<String, String>> currentItem, Object[] args) { List<Pair<VocabWord, VocabWord>> send = new ArrayList<>(); for (Pair<String, String> next : currentItem) { String w1 = next.getFirst(); String w2 = next.getSecond(); VocabWord vocabWord = vocab().wordFor(w1); VocabWord vocabWord1 = vocab().wordFor(w2); send.add(new Pair<>(vocabWord, vocabWord1)); } jobQueue.add(new Pair<>(i, send)); } }, actor); actor.shutdown(); Parallelization.runInParallel(numWorkers, new Runnable() { @Override public void run() { while (processed.get() > 0 || !jobQueue.isEmpty()) { Pair<Integer, List<Pair<VocabWord, VocabWord>>> work = jobQueue.poll(); if (work == null) continue; List<Pair<VocabWord, VocabWord>> batch = work.getSecond(); for (Pair<VocabWord, VocabWord> pair : batch) { VocabWord w1 = pair.getFirst(); VocabWord w2 = pair.getSecond(); double weight = getCount(w1.getWord(), w2.getWord()); if (weight <= 0) { countUp.incrementAndGet(); processed.decrementAndGet(); continue; } errorPerIteration.incrementCount(work.getFirst(), lookupTable().iterateSample(w1, w2, weight)); countUp.incrementAndGet(); if (countUp.get() % 10000 == 0) log.info("Processed " + countUp.get() + " co occurrences"); processed.decrementAndGet(); } } } }, true); }
From source file:org.deeplearning4j.models.glove.LegacyGlove.java
public void doIteration(final int i, List<Pair<String, String>> pairList, final Counter<Integer> errorPerIteration, final AtomicInteger processed, final AtomicInteger countUp) { log.info("Iteration " + i); if (shuffle)// w w w . java 2 s .co m Collections.shuffle(pairList, new java.util.Random()); List<List<Pair<String, String>>> miniBatches = Lists.partition(pairList, batchSize); ActorSystem actor = ActorSystem.create(); Parallelization.iterateInParallel(miniBatches, new Parallelization.RunnableWithParams<List<Pair<String, String>>>() { @Override public void run(List<Pair<String, String>> currentItem, Object[] args) { List<Pair<VocabWord, VocabWord>> send = new ArrayList<>(); for (Pair<String, String> next : currentItem) { String w1 = next.getFirst(); String w2 = next.getSecond(); VocabWord vocabWord = vocab().wordFor(w1); VocabWord vocabWord1 = vocab().wordFor(w2); send.add(new Pair<>(vocabWord, vocabWord1)); } jobQueue.add(new Pair<>(i, send)); } }, actor); actor.shutdown(); Parallelization.runInParallel(numWorkers, new Runnable() { @Override public void run() { while (processed.get() > 0 || !jobQueue.isEmpty()) { Pair<Integer, List<Pair<VocabWord, VocabWord>>> work = jobQueue.poll(); if (work == null) continue; List<Pair<VocabWord, VocabWord>> batch = work.getSecond(); for (Pair<VocabWord, VocabWord> pair : batch) { VocabWord w1 = pair.getFirst(); VocabWord w2 = pair.getSecond(); double weight = getCount(w1.getWord(), w2.getWord()); if (weight <= 0) { countUp.incrementAndGet(); processed.decrementAndGet(); continue; } //errorPerIteration.incrementCount(work.getFirst(),lookupTable().iterateSample(w1, w2, weight)); countUp.incrementAndGet(); if (countUp.get() % 10000 == 0) log.info("Processed " + countUp.get() + " co occurrences"); processed.decrementAndGet(); } } } }, true); }
From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.java
/** * Verify that "write" operations for a single table are serialized, * but different tables can be executed in parallel. *//*from w w w.ja va 2s. co m*/ @Test(timeout = 90000) public void testConcurrentWriteOps() throws Exception { final TestTableProcSet procSet = new TestTableProcSet(queue); final int NUM_ITEMS = 10; final int NUM_TABLES = 4; final AtomicInteger opsCount = new AtomicInteger(0); for (int i = 0; i < NUM_TABLES; ++i) { TableName tableName = TableName.valueOf(String.format("testtb-%04d", i)); for (int j = 1; j < NUM_ITEMS; ++j) { procSet.addBack(new TestTableProcedure(i * 100 + j, tableName, TableProcedureInterface.TableOperationType.EDIT)); opsCount.incrementAndGet(); } } assertEquals(opsCount.get(), queue.size()); final Thread[] threads = new Thread[NUM_TABLES * 2]; final HashSet<TableName> concurrentTables = new HashSet<TableName>(); final ArrayList<String> failures = new ArrayList<String>(); final AtomicInteger concurrentCount = new AtomicInteger(0); for (int i = 0; i < threads.length; ++i) { threads[i] = new Thread() { @Override public void run() { while (opsCount.get() > 0) { try { Procedure proc = procSet.acquire(); if (proc == null) { queue.signalAll(); if (opsCount.get() > 0) { continue; } break; } TableName tableId = procSet.getTableName(proc); synchronized (concurrentTables) { assertTrue("unexpected concurrency on " + tableId, concurrentTables.add(tableId)); } assertTrue(opsCount.decrementAndGet() >= 0); try { long procId = proc.getProcId(); int concurrent = concurrentCount.incrementAndGet(); assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES, concurrent >= 1 && concurrent <= NUM_TABLES); LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent=" + concurrent); Thread.sleep(2000); concurrent = concurrentCount.decrementAndGet(); LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent=" + concurrent); assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES); } finally { synchronized (concurrentTables) { assertTrue(concurrentTables.remove(tableId)); } procSet.release(proc); } } catch (Throwable e) { LOG.error("Failed " + e.getMessage(), e); synchronized (failures) { failures.add(e.getMessage()); } } finally { queue.signalAll(); } } } }; threads[i].start(); } for (int i = 0; i < threads.length; ++i) { threads[i].join(); } assertTrue(failures.toString(), failures.isEmpty()); assertEquals(0, opsCount.get()); assertEquals(0, queue.size()); for (int i = 1; i <= NUM_TABLES; ++i) { TableName table = TableName.valueOf(String.format("testtb-%04d", i)); assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table)); } }
From source file:edu.cmu.graphchi.engine.HypergraphChiEngine.java
private void execUpdates(final HypergraphChiProgram<VertexDataType, EdgeDataType> program, final ChiVertex<VertexDataType, EdgeDataType>[] vertices) { if (vertices == null || vertices.length == 0) return;/* www.java 2 s .c o m*/ TimerContext _timer = executionTimer.time(); if (Runtime.getRuntime().availableProcessors() == 1) { /* Sequential updates */ for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) { if (vertex != null) { nupdates++; hypergraphUpdate(program, vertex, chiContext); // program.update(vertex, chiContext); } } } else { final Object termlock = new Object(); final int chunkSize = 1 + vertices.length / 64; final int nWorkers = vertices.length / chunkSize + 1; final AtomicInteger countDown = new AtomicInteger(1 + nWorkers); if (!enableDeterministicExecution) { for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) { if (vertex != null) vertex.parallelSafe = true; } } /* Parallel updates. One thread for non-parallel safe updates, others updated in parallel. This guarantees deterministic execution. */ /* Non-safe updates */ parallelExecutor.submit(new Runnable() { public void run() { int thrupdates = 0; GraphChiContext threadContext = chiContext.clone(0); try { for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) { if (vertex != null && !vertex.parallelSafe) { thrupdates++; hypergraphUpdate(program, vertex, threadContext); //program.update(vertex, threadContext); } } } catch (Exception e) { e.printStackTrace(); } finally { int pending = countDown.decrementAndGet(); synchronized (termlock) { nupdates += thrupdates; if (pending == 0) { termlock.notifyAll(); ; } } } } }); /* Parallel updates */ for (int thrId = 0; thrId < nWorkers; thrId++) { final int myId = thrId; final int chunkStart = myId * chunkSize; final int chunkEnd = chunkStart + chunkSize; parallelExecutor.submit(new Runnable() { public void run() { int thrupdates = 0; GraphChiContext threadContext = chiContext.clone(1 + myId); try { int end = chunkEnd; if (end > vertices.length) end = vertices.length; for (int i = chunkStart; i < end; i++) { ChiVertex<VertexDataType, EdgeDataType> vertex = vertices[i]; if (vertex != null && vertex.parallelSafe) { thrupdates++; hypergraphUpdate(program, vertex, threadContext); //program.update(vertex, threadContext); } } } catch (Exception e) { e.printStackTrace(); } finally { int pending = countDown.decrementAndGet(); synchronized (termlock) { nupdates += thrupdates; if (pending == 0) { termlock.notifyAll(); } } } } }); } synchronized (termlock) { while (countDown.get() > 0) { try { termlock.wait(1500); } catch (InterruptedException e) { // What to do? e.printStackTrace(); } if (countDown.get() > 0) logger.info("Waiting for execution to finish: countDown:" + countDown.get()); } } } _timer.stop(); }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java
@Test public void testKilledServerWithEnsembleProvider() throws Exception { final int CLIENT_QTY = 10; final Timing timing = new Timing(); final String PATH = "/foo/bar/lock"; ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY); ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService); TestingCluster cluster = new TestingCluster(3); try {// w w w . ja v a2 s . co m cluster.start(); final AtomicReference<String> connectionString = new AtomicReference<String>( cluster.getConnectString()); final EnsembleProvider provider = new EnsembleProvider() { @Override public void start() throws Exception { } @Override public String getConnectionString() { return connectionString.get(); } @Override public void close() throws IOException { } }; final Semaphore acquiredSemaphore = new Semaphore(0); final AtomicInteger acquireCount = new AtomicInteger(0); final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY); for (int i = 0; i < CLIENT_QTY; ++i) { completionService.submit(new Callable<Void>() { @Override public Void call() throws Exception { CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider) .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()) .retryPolicy(new ExponentialBackoffRetry(100, 3)).build(); try { final Semaphore suspendedSemaphore = new Semaphore(0); client.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { if ((newState == ConnectionState.SUSPENDED) || (newState == ConnectionState.LOST)) { suspendedLatch.countDown(); suspendedSemaphore.release(); } } }); client.start(); InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1); while (!Thread.currentThread().isInterrupted()) { Lease lease = null; try { lease = semaphore.acquire(); acquiredSemaphore.release(); acquireCount.incrementAndGet(); suspendedSemaphore.acquire(); } catch (Exception e) { // just retry } finally { if (lease != null) { acquireCount.decrementAndGet(); IOUtils.closeQuietly(lease); } } } } finally { IOUtils.closeQuietly(client); } return null; } }); } Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore)); Assert.assertEquals(1, acquireCount.get()); cluster.close(); timing.awaitLatch(suspendedLatch); timing.forWaiting().sleepABit(); Assert.assertEquals(0, acquireCount.get()); cluster = new TestingCluster(3); cluster.start(); connectionString.set(cluster.getConnectString()); timing.forWaiting().sleepABit(); Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore)); timing.forWaiting().sleepABit(); Assert.assertEquals(1, acquireCount.get()); } finally { executorService.shutdown(); executorService.awaitTermination(10, TimeUnit.SECONDS); executorService.shutdownNow(); IOUtils.closeQuietly(cluster); } }
From source file:com.vmware.admiral.adapter.docker.service.DockerAdapterService.java
private void connectCreatedContainerToNetworks(RequestContext context) { AtomicInteger count = new AtomicInteger(context.containerState.networks.size()); AtomicBoolean error = new AtomicBoolean(); for (Entry<String, ServiceNetwork> entry : context.containerState.networks.entrySet()) { CommandInput connectCommandInput = new CommandInput(context.commandInput); String containerId = context.containerState.id; String networkId = entry.getKey(); addNetworkConfig(connectCommandInput, context.containerState.id, entry.getKey(), entry.getValue()); context.executor.connectContainerToNetwork(connectCommandInput, (o, ex) -> { if (ex != null) { logWarning("Exception while connecting container [%s] to network [%s]", containerId, networkId); if (error.compareAndSet(false, true)) { // Update the container state so further actions (e.g. cleanup) can be performed context.containerState.status = ContainerState.CONTAINER_ERROR_STATUS; context.containerState.powerState = ContainerState.PowerState.ERROR; context.requestFailed = true; inspectContainer(context); fail(context.request, o, ex); }/*from w w w.j ava 2 s .co m*/ } else if (count.decrementAndGet() == 0) { startCreatedContainer(context); } }); } }
From source file:com.indeed.lsmtree.core.TestImmutableBTreeIndex.java
public void testRandom() throws Exception { final int[] ints = createTree(); final ImmutableBTreeIndex.Reader<Integer, Long> reader = new ImmutableBTreeIndex.Reader(tmpDir, new IntSerializer(), new LongSerializer(), false); final int max = ints[ints.length - 1]; final AtomicInteger done = new AtomicInteger(8); for (int i = 0; i < 8; i++) { final int index = i; new Thread(new Runnable() { @Override/*from w w w. j av a2 s . c o m*/ public void run() { try { final Random r = new Random(index); for (int i = 0; i < treeSize; i++) { int rand = r.nextInt(max + 1); int insertionindex = Arrays.binarySearch(ints, rand); final Iterator<Generation.Entry<Integer, Long>> iterator = reader.iterator(rand, true); try { assertTrue(iterator.hasNext()); } catch (Throwable t) { System.err.println("rand: " + rand); throw Throwables.propagate(t); } Generation.Entry<Integer, Long> entry = iterator.next(); assertTrue("entry: " + entry + " rand: " + rand, entry.getKey() >= rand); assertTrue(entry.getKey().longValue() == entry.getValue()); if (insertionindex >= 0) { assertTrue(rand == ints[insertionindex]); assertTrue(entry.getKey() == rand); Generation.Entry<Integer, Long> result = reader.get(rand); assertTrue(result.getValue() == rand); } else { if (insertionindex != -1) assertTrue(ints[(~insertionindex) - 1] < rand); assertTrue( "insertionindex: " + insertionindex + " entry: " + entry + " ints[!insertionindex]" + ints[~insertionindex], ints[~insertionindex] == entry.getKey()); Generation.Entry<Integer, Long> result = reader.get(rand); assertTrue(result == null); } } } finally { done.decrementAndGet(); } } }).start(); } while (done.get() > 0) { Thread.yield(); } reader.close(); }
From source file:com.indeed.lsmtree.core.TestImmutableBTreeIndex.java
public void testSeekPrevious() throws Exception { final int[] ints = createTree(); final ImmutableBTreeIndex.Reader<Integer, Long> reader = new ImmutableBTreeIndex.Reader(tmpDir, new IntSerializer(), new LongSerializer(), false); final int max = ints[ints.length - 1]; final AtomicInteger done = new AtomicInteger(8); for (int i = 0; i < 8; i++) { final int index = i; new Thread(new Runnable() { @Override//from w w w . java 2 s . c om public void run() { try { final Random r = new Random(index); for (int i = 0; i < treeSize; i++) { int rand = r.nextInt(max + 10); int insertionindex = Arrays.binarySearch(ints, rand); final Iterator<Generation.Entry<Integer, Long>> iterator = reader.reverseIterator(rand, true); final boolean hasPrevious = iterator.hasNext(); Generation.Entry<Integer, Long> entry = null; assertEquals( "rand: " + rand + " hasPrevious: " + hasPrevious + (hasPrevious ? " previous: " + (entry = iterator.next()) : ""), hasPrevious, insertionindex != -1); if (hasPrevious) { if (entry == null) entry = iterator.next(); assertTrue(entry.getKey() <= rand); assertTrue(entry.getKey().longValue() == entry.getValue()); } if (insertionindex >= 0) { if (entry == null) entry = iterator.next(); assertTrue(rand == ints[insertionindex]); assertTrue(entry.getKey() == rand); Generation.Entry<Integer, Long> result = reader.get(rand); assertTrue(result.getValue() == rand); } else { if (hasPrevious) { assertTrue(ints[(~insertionindex) - 1] < rand); assertTrue(ints[(~insertionindex) - 1] == entry.getKey()); } Generation.Entry<Integer, Long> result = reader.get(rand); assertTrue(result == null); } } } finally { done.decrementAndGet(); } } }).start(); } while (done.get() > 0) { Thread.yield(); } reader.close(); }
From source file:org.jspresso.hrsample.backend.JspressoUnitOfWorkTest.java
/** * Tests in TX collection element update with // optimistic locking. */// w w w . j av a2s. co m @Test public void testInTXCollectionElementUpdate() { final HibernateBackendController hbc = (HibernateBackendController) getBackendController(); final AtomicInteger countDown = new AtomicInteger(10); ExecutorService es = Executors.newFixedThreadPool(countDown.get()); List<Future<Set<String>>> futures = new ArrayList<Future<Set<String>>>(); for (int t = countDown.intValue(); t > 0; t--) { futures.add(es.submit(new Callable<Set<String>>() { @Override public Set<String> call() throws Exception { final HibernateBackendController threadHbc = getApplicationContext() .getBean("applicationBackController", HibernateBackendController.class); final TransactionTemplate threadTT = threadHbc.getTransactionTemplate(); threadHbc.start(hbc.getLocale(), hbc.getClientTimeZone()); threadHbc.setApplicationSession(hbc.getApplicationSession()); BackendControllerHolder.setThreadBackendController(threadHbc); return threadTT.execute(new TransactionCallback<Set<String>>() { /** * {@inheritDoc} */ @Override public Set<String> doInTransaction(TransactionStatus status) { DetachedCriteria compCrit = DetachedCriteria.forClass(Company.class); Set<String> names = new HashSet<String>(); Company c = (Company) compCrit.getExecutableCriteria(threadHbc.getHibernateSession()) .list().iterator().next(); synchronized (countDown) { countDown.decrementAndGet(); // wait for all threads to arrive here so that we are sure they // have all read the same data. try { countDown.wait(); } catch (InterruptedException ex) { throw new BackendException("Test has been interrupted"); } } if (c.getName().startsWith("TX_")) { throw new BackendException("Wrong data read from DB"); } c.setName("TX_" + Long.toHexString(System.currentTimeMillis())); names.add(c.getName()); for (Department d : c.getDepartments()) { d.setName(Long.toHexString(System.currentTimeMillis())); names.add(d.getName()); } return names; } }); } })); } while (countDown.get() > 0) { try { Thread.sleep(200); } catch (InterruptedException ex) { throw new BackendException("Test has been interrupted"); } } synchronized (countDown) { countDown.notifyAll(); } int successfullTxCount = 0; Set<String> names = new HashSet<String>(); for (Future<Set<String>> f : futures) { try { names = f.get(); successfullTxCount++; } catch (Exception ex) { if (ex.getCause() instanceof OptimisticLockingFailureException) { // safely ignore since this is what we are testing. } else { throw new BackendException(ex); } } } es.shutdown(); assertTrue("Only 1 TX succeeded", successfullTxCount == 1); DetachedCriteria compCrit = DetachedCriteria.forClass(Company.class); Company c = hbc.findFirstByCriteria(compCrit, EMergeMode.MERGE_LAZY, Company.class); assertTrue("the company name is the one of the successfull TX", names.contains(c.getName())); for (Department d : c.getDepartments()) { assertTrue("the department name is the one of the successfull TX", names.contains(d.getName())); } }