List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:com.bt.aloha.util.ConcurrentUpdateManagerTest.java
@Test public void testConcurrentUpdateConflictAwawreGetsCalled() throws Exception { // setup/*from w w w . ja v a 2 s .co m*/ final CountDownLatch firstWriterRead = new CountDownLatch(1); final CountDownLatch secondWriterWrote = new CountDownLatch(1); final AtomicInteger failuresCounter = new AtomicInteger(); ConcurrentUpdateBlock concurrentUpdateBlock = new ConflictAwareConcurrentUpdateBlock() { public void execute() { DialogInfo di = dialogCollection.get(dialogId); log.debug("First writer read"); firstWriterRead.countDown(); log.debug("Waiting for second writer to write"); try { secondWriterWrote.await(); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } dialogCollection.replace(di); log.debug("First writer replaced"); } public String getResourceId() { return dialogId; } public void onConcurrentUpdateConflict() { failuresCounter.incrementAndGet(); } }; Runnable competingWriter = new Runnable() { public void run() { log.debug("Waiting for first writer to read"); try { firstWriterRead.await(); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } DialogInfo di = dialogCollection.get(dialogId); dialogCollection.replace(di); log.debug("Second writer replaced"); secondWriterWrote.countDown(); } }; // act new Thread(competingWriter).start(); concurrentUpdateManager.executeConcurrentUpdate(concurrentUpdateBlock); // assert assertEquals(1, failuresCounter.get()); }
From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java
/** * Creates an instance./*from w w w .j a v a 2s.c o m*/ * * @param corePoolSize core size of the thread pool * @param maxPoolSize the max number of threads in the thread pool * @param maxConcurrentRequests maximum number of concurrent requests * @param client a client for making requests * @param clusterCoordinator the cluster coordinator to use for interacting with node statuses * @param connectionTimeout the connection timeout specified in milliseconds * @param readTimeout the read timeout specified in milliseconds * @param callback a callback that will be called whenever all of the responses have been gathered for a request. May be null. * @param eventReporter an EventReporter that can be used to notify users of interesting events. May be null. * @param nifiProperties properties */ public ThreadPoolRequestReplicator(final int corePoolSize, final int maxPoolSize, final int maxConcurrentRequests, final Client client, final ClusterCoordinator clusterCoordinator, final String connectionTimeout, final String readTimeout, final RequestCompletionCallback callback, final EventReporter eventReporter, final NiFiProperties nifiProperties) { if (corePoolSize <= 0) { throw new IllegalArgumentException("The Core Pool Size must be greater than zero."); } else if (maxPoolSize < corePoolSize) { throw new IllegalArgumentException("Max Pool Size must be >= Core Pool Size."); } else if (client == null) { throw new IllegalArgumentException("Client may not be null."); } this.client = client; this.clusterCoordinator = clusterCoordinator; this.connectionTimeoutMs = (int) FormatUtils.getTimeDuration(connectionTimeout, TimeUnit.MILLISECONDS); this.readTimeoutMs = (int) FormatUtils.getTimeDuration(readTimeout, TimeUnit.MILLISECONDS); this.maxConcurrentRequests = maxConcurrentRequests; this.responseMapper = new StandardHttpResponseMapper(nifiProperties); this.eventReporter = eventReporter; this.callback = callback; this.nifiProperties = nifiProperties; client.property(ClientProperties.CONNECT_TIMEOUT, connectionTimeoutMs); client.property(ClientProperties.READ_TIMEOUT, readTimeoutMs); client.property(ClientProperties.FOLLOW_REDIRECTS, Boolean.TRUE); final AtomicInteger threadId = new AtomicInteger(0); final ThreadFactory threadFactory = r -> { final Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); t.setName("Replicate Request Thread-" + threadId.incrementAndGet()); return t; }; executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 5, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); maintenanceExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(final Runnable r) { final Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); t.setName(ThreadPoolRequestReplicator.class.getSimpleName() + " Maintenance Thread"); return t; } }); maintenanceExecutor.scheduleWithFixedDelay(() -> purgeExpiredRequests(), 1, 1, TimeUnit.SECONDS); }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessReadWriteLock.java
@Test public void testBasic() throws Exception { final int CONCURRENCY = 8; final int ITERATIONS = 100; final Random random = new Random(); final AtomicInteger concurrentCount = new AtomicInteger(0); final AtomicInteger maxConcurrentCount = new AtomicInteger(0); final AtomicInteger writeCount = new AtomicInteger(0); final AtomicInteger readCount = new AtomicInteger(0); List<Future<Void>> futures = Lists.newArrayList(); ExecutorService service = Executors.newCachedThreadPool(); for (int i = 0; i < CONCURRENCY; ++i) { Future<Void> future = service.submit(new Callable<Void>() { @Override/*w w w . j av a 2 s . c o m*/ public Void call() throws Exception { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start(); try { InterProcessReadWriteLock lock = new InterProcessReadWriteLock(client, "/lock"); for (int i = 0; i < ITERATIONS; ++i) { if (random.nextInt(100) < 10) { doLocking(lock.writeLock(), concurrentCount, maxConcurrentCount, random, 1); writeCount.incrementAndGet(); } else { doLocking(lock.readLock(), concurrentCount, maxConcurrentCount, random, Integer.MAX_VALUE); readCount.incrementAndGet(); } } } finally { IOUtils.closeQuietly(client); } return null; } }); futures.add(future); } for (Future<Void> future : futures) { future.get(); } System.out.println("Writes: " + writeCount.get() + " - Reads: " + readCount.get() + " - Max Reads: " + maxConcurrentCount.get()); Assert.assertTrue(writeCount.get() > 0); Assert.assertTrue(readCount.get() > 0); Assert.assertTrue(maxConcurrentCount.get() > 1); }
From source file:org.apache.ignite.internal.processors.igfs.IgfsProcessorSelfTest.java
/** * Test make directories in multi-threaded environment. * * @throws Exception In case of any exception. *///www .j av a 2 s .c o m @SuppressWarnings("TooBroadScope") public void testMakeListDeleteDirsMultithreaded() throws Exception { assertListDir("/"); final int max = 2 * 1000; final int threads = 50; final AtomicInteger cnt = new AtomicInteger(); info("Create directories: " + max); GridTestUtils.runMultiThreaded(new Callable<Object>() { @Override public Object call() throws Exception { for (int cur = cnt.incrementAndGet(); cur < max; cur = cnt.incrementAndGet()) igfs.mkdirs(path(cur)); return null; } }, threads, "grid-test-make-directories"); info("Validate directories were created."); cnt.set(0); // Reset counter. GridTestUtils.runMultiThreaded(new Callable<Object>() { @Override public Object call() throws Exception { for (int cur = cnt.incrementAndGet(); cur < max; cur = cnt.incrementAndGet()) { IgfsFile info = igfs.info(path(cur)); assertNotNull("Expects file exist: " + cur, info); assertTrue("Expects file is a directory: " + cur, info.isDirectory()); } return null; } }, threads, "grid-test-check-directories-exist"); info("Validate directories removing."); cnt.set(0); // Reset counter. GridTestUtils.runMultiThreaded(new Callable<Object>() { @Override public Object call() throws Exception { for (int cur = cnt.incrementAndGet(); cur < max; cur = cnt.incrementAndGet()) igfs.delete(path(cur), true); return null; } }, threads, "grid-test-delete-directories"); }
From source file:org.opennms.ng.services.poller.Poller.java
private int scheduleMatchingServices(String criteria) { String sql = "SELECT ifServices.nodeId AS nodeId, node.nodeLabel AS nodeLabel, ifServices.ipAddr AS ipAddr, " + "ifServices.serviceId AS serviceId, service.serviceName AS serviceName, ifServices.status as status, " + "outages.svcLostEventId AS svcLostEventId, events.eventUei AS svcLostEventUei, " + "outages.ifLostService AS ifLostService, outages.ifRegainedService AS ifRegainedService " + "FROM ifServices " + "JOIN node ON ifServices.nodeId = node.nodeId " + "JOIN service ON ifServices.serviceId = service.serviceId " + "LEFT OUTER JOIN outages ON " + "ifServices.nodeId = outages.nodeId AND " + "ifServices.ipAddr = outages.ipAddr AND " + "ifServices.serviceId = outages.serviceId AND " + "ifRegainedService IS NULL " + "LEFT OUTER JOIN events ON outages.svcLostEventId = events.eventid " + "WHERE ifServices.status in ('A','N')" + (criteria == null ? "" : " AND " + criteria); final AtomicInteger count = new AtomicInteger(0); Querier querier = new Querier(m_dataSource, sql) { @Override//w ww . j av a2 s . c o m public void processRow(ResultSet rs) throws SQLException { if (scheduleService(rs.getInt("nodeId"), rs.getString("nodeLabel"), rs.getString("ipAddr"), rs.getString("serviceName"), "A".equals(rs.getString("status")), (Number) rs.getObject("svcLostEventId"), rs.getTimestamp("ifLostService"), rs.getString("svcLostEventUei"))) { count.incrementAndGet(); } } }; querier.execute(); return count.get(); }
From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java
@Test public void shouldDecodeNullAsSignature() throws Exception { String response = Resources.read("signature_null.json", this.getClass()); HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1, new HttpResponseStatus(200, "OK")); HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8)); GenericQueryRequest requestMock = mock(GenericQueryRequest.class); queue.add(requestMock);/*from w w w. j av a 2 s . c o m*/ channel.writeInbound(responseHeader, responseChunk); latch.await(1, TimeUnit.SECONDS); assertEquals(1, firedEvents.size()); GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0); final AtomicInteger invokeCounter1 = new AtomicInteger(); assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, FAKE_CLIENTID, "success", "null", new Action1<ByteBuf>() { @Override public void call(ByteBuf buf) { invokeCounter1.incrementAndGet(); String item = buf.toString(CharsetUtil.UTF_8); buf.release(); fail("no result expected, got " + item); } }, new Action1<ByteBuf>() { @Override public void call(ByteBuf buf) { buf.release(); fail("no error expected"); } }, //no metrics in this json sample expectedMetricsCounts(0, 1)); assertEquals(0, invokeCounter1.get()); }
From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java
@Test public void shouldDecodeBooleanAsSignature() throws Exception { String response = Resources.read("signature_scalar.json", this.getClass()); HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1, new HttpResponseStatus(200, "OK")); HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8)); GenericQueryRequest requestMock = mock(GenericQueryRequest.class); queue.add(requestMock);/* w w w.j a v a 2 s . c o m*/ channel.writeInbound(responseHeader, responseChunk); latch.await(1, TimeUnit.SECONDS); assertEquals(1, firedEvents.size()); GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0); final AtomicInteger invokeCounter1 = new AtomicInteger(); assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, FAKE_CLIENTID, "success", "true", new Action1<ByteBuf>() { @Override public void call(ByteBuf buf) { invokeCounter1.incrementAndGet(); String item = buf.toString(CharsetUtil.UTF_8); buf.release(); fail("no result expected, got " + item); } }, new Action1<ByteBuf>() { @Override public void call(ByteBuf buf) { buf.release(); fail("no error expected"); } }, //no metrics in this json sample expectedMetricsCounts(0, 1)); assertEquals(0, invokeCounter1.get()); }
From source file:gobblin.couchbase.writer.CouchbaseWriterTest.java
private List<Pair<AbstractDocument, Future>> writeRecords(Iterator<AbstractDocument> recordIterator, CouchbaseWriter writer, int outstandingRequests, long kvTimeout, TimeUnit kvTimeoutUnit) throws DataConversionException, UnsupportedEncodingException { final BlockingQueue<Pair<AbstractDocument, Future>> outstandingCallQueue = new LinkedBlockingDeque<>( outstandingRequests);/* w ww. ja va2 s .c o m*/ final List<Pair<AbstractDocument, Future>> failedFutures = new ArrayList<>(outstandingRequests); int index = 0; long runTime = 0; final AtomicInteger callbackSuccesses = new AtomicInteger(0); final AtomicInteger callbackFailures = new AtomicInteger(0); final ConcurrentLinkedDeque<Throwable> callbackExceptions = new ConcurrentLinkedDeque<>(); Verifier verifier = new Verifier(); while (recordIterator.hasNext()) { AbstractDocument doc = recordIterator.next(); index++; verifier.onWrite(doc); final long startTime = System.nanoTime(); Future callFuture = writer.write(doc, new WriteCallback<TupleDocument>() { @Override public void onSuccess(WriteResponse<TupleDocument> writeResponse) { callbackSuccesses.incrementAndGet(); } @Override public void onFailure(Throwable throwable) { callbackFailures.incrementAndGet(); callbackExceptions.add(throwable); } }); drainQueue(outstandingCallQueue, 1, kvTimeout, kvTimeoutUnit, failedFutures); outstandingCallQueue.add(new Pair<>(doc, callFuture)); runTime += System.nanoTime() - startTime; } int failedWrites = 0; long responseStartTime = System.nanoTime(); drainQueue(outstandingCallQueue, outstandingRequests, kvTimeout, kvTimeoutUnit, failedFutures); runTime += System.nanoTime() - responseStartTime; for (Throwable failure : callbackExceptions) { System.out.println(failure.getClass() + " : " + failure.getMessage()); } failedWrites += failedFutures.size(); System.out.println("Total time to send " + index + " records = " + runTime / 1000000.0 + "ms, " + "Failed writes = " + failedWrites + " Callback Successes = " + callbackSuccesses.get() + "Callback Failures = " + callbackFailures.get()); verifier.verify(writer.getBucket()); return failedFutures; }
From source file:com.github.podd.resources.test.UploadArtifactResourceImplTest.java
@Test public final void testLoadArtifactConcurrency() throws Exception { // load test artifact final InputStream inputStream4Artifact = this.getClass() .getResourceAsStream(TestConstants.TEST_ARTIFACT_IMPORT_PSCIENCEv1); Assert.assertNotNull("Could not find test resource: " + TestConstants.TEST_ARTIFACT_IMPORT_PSCIENCEv1, inputStream4Artifact);/* w w w .j a va2s .c o m*/ final String nextTestArtifact = IOUtils.toString(inputStream4Artifact); final AtomicInteger threadSuccessCount = new AtomicInteger(0); final AtomicInteger perThreadSuccessCount = new AtomicInteger(0); final AtomicInteger threadStartCount = new AtomicInteger(0); final AtomicInteger perThreadStartCount = new AtomicInteger(0); final CountDownLatch openLatch = new CountDownLatch(1); // Changing this from 8 to 9 on my machine may be triggering a restlet // bug final int threadCount = 9; final int perThreadCount = 2; final CountDownLatch closeLatch = new CountDownLatch(threadCount); for (int i = 0; i < threadCount; i++) { final int number = i; final Runnable runner = new Runnable() { @Override public void run() { try { openLatch.await(55000, TimeUnit.MILLISECONDS); threadStartCount.incrementAndGet(); for (int j = 0; j < perThreadCount; j++) { perThreadStartCount.incrementAndGet(); ClientResource uploadArtifactClientResource = null; try { uploadArtifactClientResource = new ClientResource( UploadArtifactResourceImplTest.this .getUrl(PoddWebConstants.PATH_ARTIFACT_UPLOAD)); AbstractResourceImplTest.setupThreading(uploadArtifactClientResource.getContext()); final Representation input = UploadArtifactResourceImplTest.this .buildRepresentationFromResource( TestConstants.TEST_ARTIFACT_IMPORT_PSCIENCEv1, MediaType.APPLICATION_RDF_XML); final Representation results = UploadArtifactResourceImplTest.this .doTestAuthenticatedRequest(uploadArtifactClientResource, Method.POST, input, MediaType.APPLICATION_RDF_XML, Status.SUCCESS_OK, AbstractResourceImplTest.WITH_ADMIN); // verify: results (expecting the added // artifact's ontology IRI) final String body = UploadArtifactResourceImplTest.this.getText(results); final Collection<InferredOWLOntologyID> ontologyIDs = OntologyUtils .stringToOntologyID(body, RDFFormat.RDFXML); Assert.assertNotNull("No ontology IDs in response", ontologyIDs); Assert.assertEquals("More than 1 ontology ID in response", 1, ontologyIDs.size()); Assert.assertTrue("Ontology ID not of expected format", ontologyIDs.iterator().next().toString().contains("artifact:1:version:1")); perThreadSuccessCount.incrementAndGet(); } finally { UploadArtifactResourceImplTest.this.releaseClient(uploadArtifactClientResource); } } threadSuccessCount.incrementAndGet(); } catch (final Throwable e) { e.printStackTrace(); Assert.fail("Failed in test: " + number); } finally { closeLatch.countDown(); } } }; new Thread(runner, "TestThread" + number).start(); } // all threads are waiting on the latch. openLatch.countDown(); // release the latch // all threads are now running concurrently. closeLatch.await(50000, TimeUnit.MILLISECONDS); // closeLatch.await(); // Verify that there were no startup failures Assert.assertEquals("Some threads did not all start successfully", threadCount, threadStartCount.get()); Assert.assertEquals("Some thread loops did not start successfully", perThreadCount * threadCount, perThreadStartCount.get()); // Verify that there were no failures, as the count is only incremented // for successes, where // the closeLatch must always be called, even for failures Assert.assertEquals("Some thread loops did not complete successfully", perThreadCount * threadCount, perThreadSuccessCount.get()); Assert.assertEquals("Some threads did not complete successfully", threadCount, threadSuccessCount.get()); }
From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java
@Test public void shouldDecodeOneRowResponseWithNoClientID() throws Exception { String response = Resources.read("no_client_id.json", this.getClass()); HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1, new HttpResponseStatus(200, "OK")); HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8)); GenericQueryRequest requestMock = mock(GenericQueryRequest.class); queue.add(requestMock);/* www . ja va2s . c o m*/ channel.writeInbound(responseHeader, responseChunk); latch.await(1, TimeUnit.SECONDS); assertEquals(1, firedEvents.size()); GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0); final AtomicInteger invokeCounter1 = new AtomicInteger(); assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, "", "success", FAKE_SIGNATURE, new Action1<ByteBuf>() { @Override public void call(ByteBuf buf) { invokeCounter1.incrementAndGet(); String response = buf.toString(CharsetUtil.UTF_8); try { Map found = mapper.readValue(response, Map.class); assertEquals(12, found.size()); assertEquals("San Francisco", found.get("city")); assertEquals("United States", found.get("country")); Map geo = (Map) found.get("geo"); assertNotNull(geo); assertEquals(3, geo.size()); assertEquals("ROOFTOP", geo.get("accuracy")); } catch (IOException e) { assertFalse(true); } } }, new Action1<ByteBuf>() { @Override public void call(ByteBuf buf) { fail("no error expected"); } }, expectedMetricsCounts(0, 1)); assertEquals(1, invokeCounter1.get()); }