List of usage examples for java.util.concurrent.atomic AtomicInteger decrementAndGet
public final int decrementAndGet()
From source file:com.indeed.lsmtree.recordlog.TestRecordLogDirectory.java
public void testRandom() throws Exception { final RecordLogDirectory<String> fileCache = createRecordLogDirectory(); final AtomicInteger done = new AtomicInteger(8); for (int i = 0; i < 8; i++) { final int index = i; new Thread(new Runnable() { @Override/*from w w w. j a va 2s . co m*/ public void run() { try { final Random r = new Random(index); for (int i = 0; i < 10000; i++) { int rand = r.nextInt(positions.size()); assertTrue(fileCache.get(positions.get(rand)).equals(strings.get(rand))); } } catch (IOException e) { throw new RuntimeException(e); } finally { done.decrementAndGet(); } } }).start(); } while (done.get() > 0) { Thread.yield(); } fileCache.close(); }
From source file:org.wso2.andes.kernel.OnflightMessageTracker.java
/** * Decrement message count in slot and if it is zero check the slot again to resend * * @param slot Slot whose message count is decremented * @throws AndesException/*w w w . ja v a2 s. c o m*/ */ public void decrementMessageCountInSlotAndCheckToResend(Slot slot) throws AndesException { AtomicInteger pendingMessageCount = pendingMessagesBySlot.get(slot); int messageCount = pendingMessageCount.decrementAndGet(); if (messageCount == 0) { /* All the Acks for the slot has bee received. Check the slot again for unsend messages and if there are any send them and delete the slot. */ SlotDeliveryWorker slotWorker = SlotDeliveryWorkerManager.getInstance() .getSlotWorker(slot.getStorageQueueName()); if (log.isDebugEnabled()) { log.debug("Slot has no pending messages. Now re-checking slot for messages"); } slot.setSlotInActive(); slotWorker.deleteSlot(slot); } }
From source file:com.spotify.heroic.shell.task.DeleteKeys.java
private AsyncFuture<Void> doDelete(final ShellIO io, final Parameters params, final MetricBackendGroup group, final QueryOptions options, final Stream<BackendKey> keys) { final StreamCollector<Pair<BackendKey, Long>, Void> collector = new StreamCollector<Pair<BackendKey, Long>, Void>() { @Override//from w w w. ja v a 2s .co m public void resolved(Pair<BackendKey, Long> result) throws Exception { if (params.verbose) { synchronized (io) { io.out().println("Deleted: " + result.getLeft() + " (" + result.getRight() + ")"); io.out().flush(); } } } @Override public void failed(Throwable cause) throws Exception { synchronized (io) { io.out().println("Delete Failed: " + cause); cause.printStackTrace(io.out()); io.out().flush(); } } @Override public void cancelled() throws Exception { } @Override public Void end(int resolved, int failed, int cancelled) throws Exception { io.out().println("Finished (resolved: " + resolved + ", failed: " + failed + ", " + "cancelled: " + cancelled + ")"); io.out().flush(); return null; } }; final AtomicInteger outstanding = new AtomicInteger(params.parallelism); final Object lock = new Object(); final ResolvableFuture<Void> future = async.future(); final Iterator<BackendKey> it = keys.iterator(); for (int i = 0; i < params.parallelism; i++) { async.call(new Callable<Void>() { @Override public Void call() throws Exception { final BackendKey k; synchronized (lock) { k = it.hasNext() ? it.next() : null; } if (k == null) { if (outstanding.decrementAndGet() == 0) { future.resolve(null); } return null; } deleteKey(group, k, options).onDone(new FutureDone<Pair<BackendKey, Long>>() { @Override public void failed(Throwable cause) throws Exception { collector.failed(cause); } @Override public void resolved(Pair<BackendKey, Long> result) throws Exception { collector.resolved(result); } @Override public void cancelled() throws Exception { collector.cancelled(); } }).onFinished(this::call); return null; } }); } return future.onFinished(keys::close); }
From source file:com.espertech.esper.dataflow.core.EPDataFlowInstanceImpl.java
public synchronized void start() { checkExecCompleteState();//from w w w. j ava2 s .c om checkExecCancelledState(); checkExecRunningState(); callOperatorOpen(); final AtomicInteger countdown = new AtomicInteger(sourceRunnables.size()); threads = new ArrayList<Thread>(); for (int i = 0; i < sourceRunnables.size(); i++) { GraphSourceRunnable runnable = sourceRunnables.get(i); String threadName = "esper." + dataFlowName + "-" + i; Thread thread = new Thread(runnable, threadName); thread.setContextClassLoader(Thread.currentThread().getContextClassLoader()); thread.setDaemon(true); runnable.addCompletionListener(new CompletionListener() { public void completed() { int remaining = countdown.decrementAndGet(); if (remaining == 0) { EPDataFlowInstanceImpl.this.completed(); } } }); threads.add(thread); thread.start(); } setState(EPDataFlowState.RUNNING); }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessReadWriteLock.java
private void doLocking(InterProcessLock lock, AtomicInteger concurrentCount, AtomicInteger maxConcurrentCount, Random random, int maxAllowed) throws Exception { try {//from w ww . j ava 2s .c o m Assert.assertTrue(lock.acquire(10, TimeUnit.SECONDS)); int localConcurrentCount; synchronized (this) { localConcurrentCount = concurrentCount.incrementAndGet(); if (localConcurrentCount > maxConcurrentCount.get()) { maxConcurrentCount.set(localConcurrentCount); } } Assert.assertTrue(localConcurrentCount <= maxAllowed, "" + localConcurrentCount); Thread.sleep(random.nextInt(9) + 1); } finally { synchronized (this) { concurrentCount.decrementAndGet(); lock.release(); } } }
From source file:org.wso2.andes.server.cassandra.OnflightMessageTracker.java
/** * Decrement message count in slot and if it is zero check the slot again to resend * * @param slot Slot whose message count is decremented * @throws AndesException// w w w. j a va 2s.co m */ public void decrementMessageCountInSlotAndCheckToResend(Slot slot) throws AndesException { AtomicInteger pendingMessageCount = pendingMessagesBySlot.get(slot); int messageCount = pendingMessageCount.decrementAndGet(); if (messageCount == 0) { /* All the Acks for the slot has bee received. Check the slot again for unsend messages and if there are any send them and delete the slot. */ SlotDeliveryWorker slotWorker = SlotDeliveryWorkerManager.getInstance() .getSlotWorker(slot.getStorageQueueName()); if (log.isDebugEnabled()) { log.debug("Slot has no pending messages. Now re-checking slot for messages"); } slotWorker.checkForSlotCompletionAndResend(slot); } }
From source file:com.vmware.photon.controller.deployer.dcp.task.CreateIsoTaskService.java
private void applyConfiguration(final State currentState, Set<ContainerService.State> containerServices) throws Exception { String mustacheDirectory = TMP_CONFIG_DIR + "-" + currentState.vmId + "/"; ServiceConfigurator serviceConfigurator = HostUtils.getServiceConfiguratorFactory(this).create(); serviceConfigurator.copyDirectory(//from w w w . j av a 2s . co m HostUtils.getDeployerContext(CreateIsoTaskService.this).getConfigDirectory(), mustacheDirectory); final AtomicInteger pendingRequests = new AtomicInteger(containerServices.size()); FutureCallback<ContainerService.State> callback = new FutureCallback<ContainerService.State>() { @Override public void onSuccess(@Nullable ContainerService.State result) { if (0 == pendingRequests.decrementAndGet()) { try { createIsoFile(currentState, mustacheDirectory); } catch (Throwable t) { failTask(t); } } } @Override public void onFailure(Throwable t) { failTask(t); } }; containerServices.stream().forEach(containerService -> getContainerTemplate(mustacheDirectory, Utils.fromJson(containerService, ContainerService.State.class), callback)); }
From source file:org.springframework.integration.ip.tcp.connection.CachingClientConnectionFactoryTests.java
@SuppressWarnings("unchecked") @Test //INT-3722/*from w w w . j ava 2s . co m*/ public void testGatewayRelease() throws Exception { TcpNetServerConnectionFactory in = new TcpNetServerConnectionFactory(0); in.setApplicationEventPublisher(mock(ApplicationEventPublisher.class)); final TcpSendingMessageHandler handler = new TcpSendingMessageHandler(); handler.setConnectionFactory(in); final AtomicInteger count = new AtomicInteger(2); in.registerListener(message -> { if (!(message instanceof ErrorMessage)) { if (count.decrementAndGet() < 1) { try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } handler.handleMessage(message); } return false; }); handler.setBeanFactory(mock(BeanFactory.class)); handler.afterPropertiesSet(); handler.start(); TestingUtilities.waitListening(in, null); int port = in.getPort(); TcpNetClientConnectionFactory out = new TcpNetClientConnectionFactory("localhost", port); out.setApplicationEventPublisher(mock(ApplicationEventPublisher.class)); CachingClientConnectionFactory cache = new CachingClientConnectionFactory(out, 2); final TcpOutboundGateway gate = new TcpOutboundGateway(); gate.setConnectionFactory(cache); QueueChannel outputChannel = new QueueChannel(); gate.setOutputChannel(outputChannel); gate.setBeanFactory(mock(BeanFactory.class)); gate.afterPropertiesSet(); Log logger = spy(TestUtils.getPropertyValue(gate, "logger", Log.class)); new DirectFieldAccessor(gate).setPropertyValue("logger", logger); when(logger.isDebugEnabled()).thenReturn(true); doAnswer(new Answer<Void>() { private final CountDownLatch latch = new CountDownLatch(2); @Override public Void answer(InvocationOnMock invocation) throws Throwable { invocation.callRealMethod(); String log = invocation.getArgument(0); if (log.startsWith("Response")) { Executors.newSingleThreadScheduledExecutor() .execute(() -> gate.handleMessage(new GenericMessage<>("bar"))); // hold up the first thread until the second has added its pending reply latch.await(10, TimeUnit.SECONDS); } else if (log.startsWith("Added")) { latch.countDown(); } return null; } }).when(logger).debug(anyString()); gate.start(); gate.handleMessage(new GenericMessage<String>("foo")); Message<byte[]> result = (Message<byte[]>) outputChannel.receive(10000); assertNotNull(result); assertEquals("foo", new String(result.getPayload())); result = (Message<byte[]>) outputChannel.receive(10000); assertNotNull(result); assertEquals("bar", new String(result.getPayload())); handler.stop(); gate.stop(); verify(logger, never()).error(anyString()); }
From source file:org.apache.hadoop.hbase.zookeeper.lock.TestHReadWriteLockImpl.java
@Test(timeout = 30000) public void testReadLockDoesNotExcludeReaders() throws Exception { final String testName = "testReadLockDoesNotExcludeReaders"; final HReadWriteLockImpl readWriteLock = getReadWriteLock(testName); final CountDownLatch locksAcquiredLatch = new CountDownLatch(NUM_THREADS); final AtomicInteger locksHeld = new AtomicInteger(0); List<Future<Void>> results = Lists.newArrayList(); for (int i = 0; i < NUM_THREADS; ++i) { final String threadDesc = testName + i; results.add(executor.submit(new Callable<Void>() { @Override/*from w ww. j a v a2 s. co m*/ public Void call() throws Exception { HReadLockImpl readLock = readWriteLock.readLock(Bytes.toBytes(threadDesc)); readLock.acquire(); try { locksHeld.incrementAndGet(); locksAcquiredLatch.countDown(); Thread.sleep(1000); } finally { readLock.release(); locksHeld.decrementAndGet(); } return null; } })); } locksAcquiredLatch.await(); assertEquals(locksHeld.get(), NUM_THREADS); MultiThreadedTestUtils.assertOnFutures(results); }
From source file:com.indeed.lsmtree.recordlog.TestBlockCompressedRecordFile.java
public void testRandomWithReader() throws IOException { final BlockCompressedRecordFile<String> recordFile = createBlockCache(); final AtomicInteger done = new AtomicInteger(8); for (int i = 0; i < 8; i++) { final int index = i; new Thread(new Runnable() { @Override//from www . jav a2s . com public void run() { try { final Random r = new Random(index); for (int i = 0; i < 10000000; i++) { int rand = r.nextInt(positions.size()); final RecordFile.Reader<String> reader = recordFile.reader(positions.get(rand)); assertTrue(reader.next()); assertEquals(reader.get(), strings.get(rand)); reader.close(); } } catch (IOException e) { throw new RuntimeException(e); } finally { done.decrementAndGet(); } } }).start(); } while (done.get() > 0) { Thread.yield(); } recordFile.close(); }