List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:com.microsoft.azure.servicebus.samples.jmstopicquickstart.JmsTopicQuickstart.java
private void receiveFromSubscription(ConnectionStringBuilder csb, Context context, ConnectionFactory cf, String name) throws NamingException, JMSException, InterruptedException { AtomicInteger totalReceived = new AtomicInteger(0); System.out.printf("Subscription %s: \n", name); Destination subscription = (Destination) context.lookup(name); // Create Connection Connection connection = cf.createConnection(csb.getSasKeyName(), csb.getSasKey()); connection.start();/*from ww w. j av a2 s . co m*/ // Create Session, no transaction, client ack Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE); // Create consumer MessageConsumer consumer = session.createConsumer(subscription); // Set callback listener. Gets called for each received message. consumer.setMessageListener(message -> { try { System.out.printf("Received message %d with sq#: %s\n", totalReceived.incrementAndGet(), // increments the counter message.getJMSMessageID()); message.acknowledge(); } catch (Exception e) { System.out.printf("%s", e.toString()); } }); // wait on the main thread until all sent messages have been received while (totalReceived.get() < totalSend) { Thread.sleep(1000); } consumer.close(); session.close(); connection.stop(); connection.close(); }
From source file:com.alibaba.wasp.jdbc.TestAtomicOperation.java
/** * Test multi-threaded row mutations./*from ww w . j a v a 2 s .c o m*/ */ @Test public void testRowMutationMultiThreads() throws IOException { LOG.info("Starting test testRowMutationMultiThreads"); // create 100 threads, each will alternate between adding and // removing a column int numThreads = 10; int opsPerThread = 50; AtomicOperation[] all = new AtomicOperation[numThreads]; AtomicInteger failures = new AtomicInteger(0); // create all threads for (int i = 0; i < numThreads; i++) { try { all[i] = new AtomicOperation(entityGroup, opsPerThread, conn, failures) { @Override public void run() { for (int i = 0; i < numOps; i++) { try { int lines = stmt.executeUpdate("insert into " + TABLE_NAME + " (column1,column2,column3) values(1,1,'wasptest')"); if (lines != 1) { LOG.debug(r); failures.incrementAndGet(); fail(); } } catch (SQLException e) { failures.incrementAndGet(); } } } }; } catch (SQLException e) { } } // run all threads for (int i = 0; i < numThreads; i++) { all[i].start(); } // wait for all threads to finish for (int i = 0; i < numThreads; i++) { try { all[i].join(); } catch (InterruptedException e) { } } System.out.println(failures.get()); assertEquals(opsPerThread * numThreads - 1, failures.get()); }
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
private Map<String, Long> calculateStreamSpaceUsage(final URI uri, final com.twitter.distributedlog.DistributedLogManagerFactory factory) throws IOException { Collection<String> streams = factory.enumerateAllLogsInNamespace(); final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); streamQueue.addAll(streams);/*from w ww . j a va2 s. c om*/ final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>(); final AtomicInteger numStreamsCollected = new AtomicInteger(0); executeAction(streamQueue, 10, new Action<String>() { @Override public void execute(String stream) throws IOException { streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(factory, stream)); if (numStreamsCollected.incrementAndGet() % 1000 == 0) { logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri); } } }); return streamSpaceUsageMap; }
From source file:com.jkoolcloud.tnt4j.streams.parsers.AbstractSyslogParser.java
/** * Determines if log entry has to be suppressed depending on {@link #suppressionLevel} value. Calculates MD5 hash * for not ignored log entry fields. Having MD5 hash checks log message occurrences count in messages suppression * cache. NOTE: cache entry lifetime depends on {@link #cacheSize} and {@link #cacheExpireDuration} values. * <p>//from w w w . j a v a2 s . com * Log entry gets suppressed if: * <ul> * <li>{@link #suppressionLevel} value is {@code -1} and log entry occurs more than 1 time</li> * <li>{@link #suppressionLevel} value is positive integer and log entry occurs non multiple time of that * number</li> * </ul> * * @param dataMap * log entry resolved fields map * @return {@code null} if log entry gets suppressed, or same parameters defined {@code dataMap} if log entry is not * suppressed */ @SuppressWarnings("unchecked") protected Map<String, Object> suppress(Map<String, Object> dataMap) { if (suppressionLevel != 0) { AtomicInteger invocations; cacheLock.lock(); try { if (msc == null) { msc = buildCache(cacheSize, cacheExpireDuration); } String byteData = new String(getMD5(dataMap, ignoredFields)); invocations = msc.getIfPresent(byteData); if (invocations == null) { invocations = new AtomicInteger(); msc.put(byteData, invocations); } } finally { cacheLock.unlock(); } if (invocations.incrementAndGet() > 1) { if (suppressionLevel == -1) { logger().log(OpLevel.DEBUG, StreamsResources.getBundle(SyslogStreamConstants.RESOURCE_BUNDLE_NAME), "AbstractSyslogParser.suppressing.event1", invocations); return null; } if (suppressionLevel > 0) { int evtSeqNumber = invocations.get() % suppressionLevel; if (evtSeqNumber != 0) { logger().log(OpLevel.DEBUG, StreamsResources.getBundle(SyslogStreamConstants.RESOURCE_BUNDLE_NAME), "AbstractSyslogParser.suppressing.event2", evtSeqNumber, suppressionLevel); return null; } } } } if (flattenStructuredData) { Object structData = dataMap.get(SyslogStreamConstants.FIELD_SYSLOG_MAP); if (structData instanceof Map) { Map<String, Map<String, Object>> structDataMap = (Map<String, Map<String, Object>>) structData; if (structDataMap.size() == 1) { Map.Entry<String, Map<String, Object>> sdme = structDataMap.entrySet().iterator().next(); Map<String, Object> sdMap = sdme.getValue(); sdMap.put(SyslogStreamConstants.SYSLOG_STRUCT_ID, sdme.getKey()); dataMap.put(SyslogStreamConstants.FIELD_SYSLOG_MAP, sdMap); } } } return dataMap; }
From source file:org.cloudfoundry.identity.uaa.scim.endpoints.ScimUserEndpoints.java
private void incrementErrorCounts(ScimException e) { String series = UaaStringUtils.getErrorName(e); AtomicInteger value = errorCounts.get(series); if (value == null) { synchronized (errorCounts) { value = errorCounts.get(series); if (value == null) { value = new AtomicInteger(); errorCounts.put(series, value); }/*from w w w . j a v a 2 s .c o m*/ } } value.incrementAndGet(); }
From source file:org.apache.distributedlog.auditor.DLAuditor.java
private Map<String, Long> calculateStreamSpaceUsage(final URI uri, final Namespace namespace) throws IOException { Iterator<String> streams = namespace.getLogs(); final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); while (streams.hasNext()) { streamQueue.add(streams.next()); }// w w w. ja va 2 s .c o m final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>(); final AtomicInteger numStreamsCollected = new AtomicInteger(0); executeAction(streamQueue, 10, new Action<String>() { @Override public void execute(String stream) throws IOException { streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(namespace, stream)); if (numStreamsCollected.incrementAndGet() % 1000 == 0) { logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri); } } }); return streamSpaceUsageMap; }
From source file:com.netflix.curator.framework.recipes.queue.TestDistributedQueue.java
@Test public void testErrorMode() throws Exception { Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start();/*from w w w .j a v a 2 s . c om*/ try { final AtomicReference<CountDownLatch> latch = new AtomicReference<CountDownLatch>( new CountDownLatch(1)); final AtomicInteger count = new AtomicInteger(0); QueueConsumer<TestQueueItem> consumer = new QueueConsumer<TestQueueItem>() { @Override public void consumeMessage(TestQueueItem message) throws Exception { if (count.incrementAndGet() < 2) { throw new Exception(); } latch.get().countDown(); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { } }; DistributedQueue<TestQueueItem> queue = QueueBuilder.builder(client, consumer, serializer, QUEUE_PATH) .lockPath("/locks").buildQueue(); try { queue.start(); TestQueueItem item = new TestQueueItem("1"); queue.put(item); Assert.assertTrue(timing.awaitLatch(latch.get())); Assert.assertEquals(count.get(), 2); queue.setErrorMode(ErrorMode.DELETE); count.set(0); latch.set(new CountDownLatch(1)); item = new TestQueueItem("1"); queue.put(item); Assert.assertFalse(latch.get().await(5, TimeUnit.SECONDS)); // consumer should get called only once Assert.assertEquals(count.get(), 1); } finally { queue.close(); } } finally { client.close(); } }
From source file:org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFilesSplitRecovery.java
/** * This test exercises the path where there is a split after initial * validation but before the atomic bulk load call. We cannot use presplitting * to test this path, so we actually inject a split just before the atomic * region load.//from w w w . j a va2s . co m */ @Test public void testSplitWhileBulkLoadPhase() throws Exception { final String table = "splitWhileBulkloadPhase"; setupTable(table, 10); populateTable(table, 1); assertExpectedTable(table, ROWCOUNT, 1); // Now let's cause trouble. This will occur after checks and cause bulk // files to fail when attempt to atomically import. This is recoverable. final AtomicInteger attemptedCalls = new AtomicInteger(); LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) { protected void bulkLoadPhase(final HTable htable, final HConnection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException { int i = attemptedCalls.incrementAndGet(); if (i == 1) { // On first attempt force a split. forceSplit(table); } super.bulkLoadPhase(htable, conn, pool, queue, regionGroups); } }; // create HFiles for different column families HTable t = new HTable(util.getConfiguration(), Bytes.toBytes(table)); Path bulk = buildBulkFiles(table, 2); lih2.doBulkLoad(bulk, t); // check that data was loaded // The three expected attempts are 1) failure because need to split, 2) // load of split top 3) load of split bottom assertEquals(attemptedCalls.get(), 3); assertExpectedTable(table, ROWCOUNT, 2); }
From source file:com.netflix.curator.framework.recipes.cache.TestPathChildrenCache.java
@Test public void testRebuildNode() throws Exception { PathChildrenCache cache = null;/* ww w. j a va2s .co m*/ CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start(); try { client.create().creatingParentsIfNeeded().forPath("/test/one", "one".getBytes()); final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger counter = new AtomicInteger(); final Semaphore semaphore = new Semaphore(1); cache = new PathChildrenCache(client, "/test", true) { @Override void getDataAndStat(String fullPath) throws Exception { semaphore.acquire(); counter.incrementAndGet(); super.getDataAndStat(fullPath); latch.countDown(); } }; cache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE); latch.await(); int saveCounter = counter.get(); client.setData().forPath("/test/one", "alt".getBytes()); cache.rebuildNode("/test/one"); Assert.assertEquals(cache.getCurrentData("/test/one").getData(), "alt".getBytes()); Assert.assertEquals(saveCounter, counter.get()); semaphore.release(1000); } finally { IOUtils.closeQuietly(cache); IOUtils.closeQuietly(client); } }
From source file:com.github.gfx.android.orma.example.fragment.BenchmarkFragment.java
Single<Result> startSelectAllWithOrma() { return Single.fromCallable(() -> { long result = runWithBenchmark(() -> { final AtomicInteger count = new AtomicInteger(); Todo_Selector todos = orma.selectFromTodo().orderByCreatedTimeAsc(); for (Todo todo : todos) { @SuppressWarnings("unused") String title = todo.title; @SuppressWarnings("unused") String content = todo.content; @SuppressWarnings("unused") Date createdTime = todo.createdTime; count.incrementAndGet(); }/* w w w. j ava 2 s . c o m*/ if (todos.count() != count.get()) { throw new AssertionError("unexpected get: " + count.get()); } Log.d(TAG, "Orma/forEachAll count: " + count); }); return new Result("Orma/forEachAll", result); }).subscribeOn(Schedulers.io()).observeOn(AndroidSchedulers.mainThread()); }