List of usage examples for java.util.concurrent.atomic AtomicInteger decrementAndGet
public final int decrementAndGet()
From source file:com.indeed.lsmtree.recordlog.TestRecordLogDirectory.java
public void testRandomWithReader() throws Exception { final RecordLogDirectory<String> fileCache = createRecordLogDirectory(); final AtomicInteger done = new AtomicInteger(8); for (int i = 0; i < 8; i++) { final int index = i; new Thread(new Runnable() { @Override//from ww w .j a v a 2s .c o m public void run() { try { final Random r = new Random(index); for (int i = 0; i < 10000; i++) { int rand = r.nextInt(positions.size()); final RecordFile.Reader<String> reader = fileCache.reader(positions.get(rand)); assertTrue(reader.next()); assertEquals(reader.get(), strings.get(rand)); reader.close(); } } catch (IOException e) { throw new RuntimeException(e); } finally { done.decrementAndGet(); } } }).start(); } while (done.get() > 0) { Thread.yield(); } fileCache.close(); }
From source file:org.pentaho.di.job.entries.hadoopjobexecutor.JobEntryHadoopJobExecutor.java
/** * Restore the security manager if we're done executing all our threads. * /*from w w w .ja va 2s.c o m*/ * @param counter * Thread counter * @param nesm * Security Manager we set */ private void restoreSecurityManager(AtomicInteger counter, NoExitSecurityManager nesm) { if (counter.decrementAndGet() == 0) { // Restore the cached security manager after all threads have completed smStack.removeSecurityManager(nesm); } }
From source file:org.apache.hadoop.hbase.zookeeper.lock.TestZKInterProcessReadWriteLock.java
@Test(timeout = 30000) public void testReadLockDoesNotExcludeReaders() throws Exception { final String testName = "testReadLockDoesNotExcludeReaders"; final ZKInterProcessReadWriteLock readWriteLock = getReadWriteLock(testName); final CountDownLatch locksAcquiredLatch = new CountDownLatch(NUM_THREADS); final AtomicInteger locksHeld = new AtomicInteger(0); List<Future<Void>> results = Lists.newArrayList(); for (int i = 0; i < NUM_THREADS; ++i) { final String threadDesc = testName + i; results.add(executor.submit(new Callable<Void>() { @Override//from w w w .j ava 2s . c om public Void call() throws Exception { ZKInterProcessReadLock readLock = readWriteLock.readLock(Bytes.toBytes(threadDesc)); readLock.acquire(); try { locksHeld.incrementAndGet(); locksAcquiredLatch.countDown(); Thread.sleep(1000); } finally { readLock.release(); locksHeld.decrementAndGet(); } return null; } })); } locksAcquiredLatch.await(); assertEquals(locksHeld.get(), NUM_THREADS); MultithreadedTestUtil.assertOnFutures(results); }
From source file:org.apache.hadoop.hbase.client.AsyncProcess.java
/** * Decrements the counters for a given region and the region server. MT Safe. *//*from ww w. j ava 2s .c o m*/ protected void decTaskCounters(Collection<byte[]> regions, ServerName sn) { for (byte[] regBytes : regions) { AtomicInteger regionCnt = taskCounterPerRegion.get(regBytes); regionCnt.decrementAndGet(); } taskCounterPerServer.get(sn).decrementAndGet(); tasksInProgress.decrementAndGet(); synchronized (tasksInProgress) { tasksInProgress.notifyAll(); } }
From source file:edu.brown.workload.Workload.java
@Override public void stopQuery(Object query_handle, VoltTable result) { if (query_handle instanceof QueryTrace) { QueryTrace query = (QueryTrace) query_handle; query.stop();//from w w w.ja v a2 s . c om if (result != null) query.setOutput(result); // Decrement open query counter for this batch Long txn_id = this.query_txn_xref.remove(query); assert (txn_id != null) : "Unexpected QueryTrace handle that doesn't have a txn id!"; Map<Integer, AtomicInteger> m = this.xact_open_queries.get(txn_id); if (m != null) { AtomicInteger batch_ctr = m.get(query.getBatchId()); int count = batch_ctr.decrementAndGet(); assert (count >= 0) : "Invalid open query counter for batch #" + query.getBatchId() + " in Txn #" + txn_id; if (debug.val) LOG.debug("Stopping trace for query " + query); } else { LOG.warn(String.format("No open query counters for txn #%d???", txn_id)); } } else { LOG.fatal("Unable to stop query trace: Invalid query handle"); } return; }
From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelector.java
@Test public void testKillSession() throws Exception { final Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start();/*from ww w . j a va2 s. c om*/ try { final Semaphore semaphore = new Semaphore(0); final CountDownLatch interruptedLatch = new CountDownLatch(1); final AtomicInteger leaderCount = new AtomicInteger(0); LeaderSelectorListener listener = new LeaderSelectorListener() { private volatile Thread ourThread; @Override public void takeLeadership(CuratorFramework client) throws Exception { leaderCount.incrementAndGet(); try { ourThread = Thread.currentThread(); semaphore.release(); try { Thread.sleep(1000000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); interruptedLatch.countDown(); } } finally { leaderCount.decrementAndGet(); } } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { if ((newState == ConnectionState.LOST) && (ourThread != null)) { ourThread.interrupt(); } } }; LeaderSelector leaderSelector1 = new LeaderSelector(client, PATH_NAME, listener); LeaderSelector leaderSelector2 = new LeaderSelector(client, PATH_NAME, listener); leaderSelector1.start(); leaderSelector2.start(); Assert.assertTrue(timing.acquireSemaphore(semaphore, 1)); KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString()); Assert.assertTrue(timing.awaitLatch(interruptedLatch)); timing.sleepABit(); leaderSelector1.requeue(); leaderSelector2.requeue(); Assert.assertTrue(timing.acquireSemaphore(semaphore, 1)); Assert.assertEquals(leaderCount.get(), 1); leaderSelector1.close(); leaderSelector2.close(); } finally { client.close(); } }
From source file:org.apache.distributedlog.admin.DistributedLogAdmin.java
private static Map<String, StreamCandidate> checkStreams(final Namespace namespace, final Collection<String> streams, final OrderedScheduler scheduler, final int concurrency) throws IOException { final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); streamQueue.addAll(streams);/*from ww w . j a v a 2 s. c o m*/ final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>(); final AtomicInteger numPendingStreams = new AtomicInteger(streams.size()); final CountDownLatch doneLatch = new CountDownLatch(1); Runnable checkRunnable = new Runnable() { @Override public void run() { while (!streamQueue.isEmpty()) { String stream; try { stream = streamQueue.take(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } StreamCandidate candidate; try { LOG.info("Checking stream {}.", stream); candidate = checkStream(namespace, stream, scheduler); LOG.info("Checked stream {} - {}.", stream, candidate); } catch (Throwable e) { LOG.error("Error on checking stream {} : ", stream, e); doneLatch.countDown(); break; } if (null != candidate) { candidateMap.put(stream, candidate); } if (numPendingStreams.decrementAndGet() == 0) { doneLatch.countDown(); } } } }; Thread[] threads = new Thread[concurrency]; for (int i = 0; i < concurrency; i++) { threads[i] = new Thread(checkRunnable, "check-thread-" + i); threads[i].start(); } try { doneLatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (numPendingStreams.get() != 0) { throw new IOException(numPendingStreams.get() + " streams left w/o checked"); } for (int i = 0; i < concurrency; i++) { threads[i].interrupt(); try { threads[i].join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } return candidateMap; }
From source file:com.twitter.distributedlog.admin.DistributedLogAdmin.java
private static Map<String, StreamCandidate> checkStreams( final com.twitter.distributedlog.DistributedLogManagerFactory factory, final Collection<String> streams, final ExecutorService executorService, final BookKeeperClient bkc, final String digestpw, final int concurrency) throws IOException { final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); streamQueue.addAll(streams);// w w w . j av a2 s . com final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>(); final AtomicInteger numPendingStreams = new AtomicInteger(streams.size()); final CountDownLatch doneLatch = new CountDownLatch(1); Runnable checkRunnable = new Runnable() { @Override public void run() { while (!streamQueue.isEmpty()) { String stream; try { stream = streamQueue.take(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } StreamCandidate candidate; try { LOG.info("Checking stream {}.", stream); candidate = checkStream(factory, stream, executorService, bkc, digestpw); LOG.info("Checked stream {} - {}.", stream, candidate); } catch (IOException e) { LOG.error("Error on checking stream {} : ", stream, e); doneLatch.countDown(); break; } if (null != candidate) { candidateMap.put(stream, candidate); } if (numPendingStreams.decrementAndGet() == 0) { doneLatch.countDown(); } } } }; Thread[] threads = new Thread[concurrency]; for (int i = 0; i < concurrency; i++) { threads[i] = new Thread(checkRunnable, "check-thread-" + i); threads[i].start(); } try { doneLatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (numPendingStreams.get() != 0) { throw new IOException(numPendingStreams.get() + " streams left w/o checked"); } for (int i = 0; i < concurrency; i++) { threads[i].interrupt(); try { threads[i].join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } return candidateMap; }
From source file:com.sishuok.bigpipe.handler.BigpipeTaskReturnValueHandler.java
@Override public void handleReturnValue(final Object returnValue, final MethodParameter returnType, final ModelAndViewContainer mavContainer, final NativeWebRequest webRequest) throws Exception { final BigPipeTask bigPipeTask = (BigPipeTask) returnValue; final HttpServletRequest request = webRequest.getNativeRequest(HttpServletRequest.class); final HttpServletResponse response = webRequest.getNativeResponse(HttpServletResponse.class); final DeferredResult<Void> deferredResult = new DeferredResult<Void>(); mavContainer.setRequestHandled(true); WebAsyncUtils.getAsyncManager(request).startDeferredResultProcessing(deferredResult, mavContainer); final BigPipeContext context = new BigPipeContext(request.getContextPath(), bigPipeTask.getModel()); //?pagelet? ? final String framePageletName = bigPipeTask.getFramePageletName(); final Pagelet framePagelet = pageletFinder.find(framePageletName); Assert.notNull(framePagelet, framePageletName + " pagelet not exists"); final BigPipeContext frameContext = context.copy(); final PageletResult framePageletResult = framePagelet.run(frameContext, response); final PageletView framePageletView = pageletViewResolver.resolve(framePageletResult); framePageletView.render(frameContext, response); final AtomicInteger counter = new AtomicInteger(bigPipeTask.getPageletNames().size()); //?Npagelet?/* ww w. ja v a 2s . c om*/ for (String otherPageletName : bigPipeTask.getPageletNames()) { final Pagelet pagelet = pageletFinder.find(otherPageletName); Assert.notNull(pagelet, otherPageletName + " pagelet not exists"); //??? executor.execute(new Runnable() { @Override public void run() { try { final BigPipeContext pageletContext = context.copy(); final PageletResult pageletResult = pagelet.run(pageletContext, response); final PageletView pageletView = pageletViewResolver.resolve(pageletResult); pageletView.render(pageletContext, response); } catch (Exception e) { e.printStackTrace(); // } if (counter.decrementAndGet() <= 0) { deferredResult.setResult(null); } } }); } }
From source file:org.lol.reddit.reddit.api.RedditAPIIndividualSubredditDataRequester.java
public void performRequest(final Collection<String> subredditCanonicalIds, final TimestampBound timestampBound, final RequestResponseHandler<HashMap<String, RedditSubreddit>, SubredditRequestFailure> handler) { // TODO if there's a bulk API to do this, that would be good... :) final HashMap<String, RedditSubreddit> result = new HashMap<String, RedditSubreddit>(); final AtomicBoolean stillOkay = new AtomicBoolean(true); final AtomicInteger requestsToGo = new AtomicInteger(subredditCanonicalIds.size()); final AtomicLong oldestResult = new AtomicLong(Long.MAX_VALUE); final RequestResponseHandler<RedditSubreddit, SubredditRequestFailure> innerHandler = new RequestResponseHandler<RedditSubreddit, SubredditRequestFailure>() { @Override//from w w w . j av a 2 s . co m public void onRequestFailed(SubredditRequestFailure failureReason) { synchronized (result) { if (stillOkay.get()) { stillOkay.set(false); handler.onRequestFailed(failureReason); } } } @Override public void onRequestSuccess(RedditSubreddit innerResult, long timeCached) { synchronized (result) { if (stillOkay.get()) { result.put(innerResult.getKey(), innerResult); oldestResult.set(Math.min(oldestResult.get(), timeCached)); if (requestsToGo.decrementAndGet() == 0) { handler.onRequestSuccess(result, oldestResult.get()); } } } } }; for (String subredditCanonicalId : subredditCanonicalIds) { performRequest(subredditCanonicalId, timestampBound, innerHandler); } }