List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:org.apache.tinkerpop.gremlin.server.GremlinDriverIntegrateTest.java
@Test public void shouldIterate() throws Exception { final Cluster cluster = Cluster.open(); final Client client = cluster.connect(); final ResultSet results = client.submit("[1,2,3,4,5,6,7,8,9]"); final Iterator<Result> itty = results.iterator(); final AtomicInteger counter = new AtomicInteger(0); while (itty.hasNext()) { counter.incrementAndGet(); assertEquals(counter.get(), itty.next().getInt()); }/*from ww w. j a v a 2 s . co m*/ assertEquals(9, counter.get()); assertThat(results.allItemsAvailable(), is(true)); // can't stream it again assertThat(results.iterator().hasNext(), is(false)); cluster.close(); }
From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java
@Test public void testAllowLateDirectory() throws Exception { File f = new File("target", UUID.randomUUID().toString()); SpoolDirConfigBean conf = new SpoolDirConfigBean(); conf.dataFormat = DataFormat.TEXT;//ww w . j a va 2 s . c o m conf.spoolDir = f.getAbsolutePath(); conf.batchSize = 10; conf.overrunLimit = 100; conf.poolingTimeoutSecs = 1; conf.filePattern = "file-[0-9].log"; conf.pathMatcherMode = PathMatcherMode.GLOB; conf.maxSpoolFiles = 10; conf.initialFileToProcess = null; conf.dataFormatConfig.compression = Compression.NONE; conf.dataFormatConfig.filePatternInArchive = "*"; conf.errorArchiveDir = null; conf.postProcessing = PostProcessingOptions.ARCHIVE; conf.archiveDir = createTestDir(); conf.retentionTimeMins = 10; conf.dataFormatConfig.textMaxLineLen = 10; conf.dataFormatConfig.onParseError = OnParseError.ERROR; conf.dataFormatConfig.maxStackTraceLines = 0; TSpoolDirSource source = new TSpoolDirSource(conf); PushSourceRunner runner = new PushSourceRunner.Builder(TSpoolDirSource.class, source).addOutputLane("lane") .build(); //Late Directories not allowed, init should fail. conf.allowLateDirectory = false; try { runner.runInit(); Assert.fail("Should throw an exception if the directory does not exist"); } catch (StageException e) { //Expected } //Late Directories allowed, wait and should be able to detect the file and read. conf.allowLateDirectory = true; TSpoolDirSource sourceWithLateDirectory = new TSpoolDirSource(conf); PushSourceRunner runner2 = new PushSourceRunner.Builder(TSpoolDirSource.class, sourceWithLateDirectory) .addOutputLane("lane").build(); AtomicInteger batchCount = new AtomicInteger(0); runner2.runInit(); try { runner2.runProduce(new HashMap<>(), 10, output -> { batchCount.incrementAndGet(); if (batchCount.get() == 1) { runner2.setStop(); } }); runner2.waitOnProduce(); TestOffsetUtil.compare(NULL_FILE_OFFSET, runner2.getOffsets()); Assert.assertEquals(1, runner2.getEventRecords().size()); Assert.assertEquals("no-more-data", runner2.getEventRecords().get(0).getEventType()); Assert.assertTrue(f.mkdirs()); File file = new File(source.spoolDir, "file-0.log").getAbsoluteFile(); Files.createFile(file.toPath()); source.file = file; source.offset = 1; source.maxBatchSize = 10; Thread.sleep(5000L); PushSourceRunner runner3 = new PushSourceRunner.Builder(TSpoolDirSource.class, source) .addOutputLane("lane").build(); runner3.runInit(); runner3.runProduce(ImmutableMap.of(Source.POLL_SOURCE_OFFSET_KEY, "file-0.log::1"), 10, output -> { batchCount.incrementAndGet(); if (batchCount.get() > 1) { runner3.setStop(); } }); runner3.waitOnProduce(); TestOffsetUtil.compare("file-0.log::1", runner3.getOffsets()); Assert.assertEquals(1, runner3.getEventRecords().size()); Assert.assertEquals("new-file", runner3.getEventRecords().get(0).getEventType()); runner3.runDestroy(); } finally { runner2.runDestroy(); } }
From source file:org.apache.blur.shell.QueryCommand.java
private String displayRecordInRowMultiFamilyView(int result, final TableDisplay tableDisplay, final AtomicInteger line, final Map<String, List<String>> columnOrder, final String currentFamily, final Record record) { int c = 3;//from ww w. ja v a 2 s . c om List<String> orderedColumns = getOrderColumnValues(record, columnOrder); String family = record.getFamily(); if (!family.equals(currentFamily)) { List<String> list = columnOrder.get(family); for (int i = 0; i < list.size(); i++) { tableDisplay.set(i + c, line.get(), highlight(getTruncatedVersion(toStringBinary(family + "." + list.get(i))))); } tableDisplay.set(0, line.get(), white(toStringBinary(Integer.toString(result)))); line.incrementAndGet(); } tableDisplay.set(2, line.get(), white(getTruncatedVersion(toStringBinary(record.getRecordId())))); for (String oc : orderedColumns) { if (oc != null) { tableDisplay.set(c, line.get(), white(getTruncatedVersion(toStringBinary(oc)))); } c++; } tableDisplay.set(0, line.get(), white(toStringBinary(Integer.toString(result)))); line.incrementAndGet(); return family; }
From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java
@Test public void testRespectsBufferMax() throws InterruptedException { final AtomicInteger droppedEvents = new AtomicInteger(0); final Semaphore semaphoreA = new Semaphore(0); final Semaphore semaphoreB = new Semaphore(0); final Semaphore semaphoreC = new Semaphore(-2); final AtomicInteger recordsReceived = new AtomicInteger(0); _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> { recordsReceived.incrementAndGet(); // Annotations Assert.assertEquals(0, r.getAnnotationsCount()); // Dimensions Assert.assertEquals(0, r.getDimensionsCount()); // Samples assertSample(r.getTimersList(), "timer", 7d); assertSample(r.getCountersList(), "counter", 8d); assertSample(r.getGaugesList(), "gauge", 9d); })).willReturn(WireMock.aResponse().withStatus(200))); final Sink sink = new ApacheHttpSink.Builder() .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(2) .setParallelism(1).setBufferSize(5).setEmptyQueueInterval(Duration.ofMillis(1000)) .setEventHandler(/* w w w . ja va2 s .c o m*/ new RespectsMaxBufferEventHandler(semaphoreA, semaphoreB, semaphoreC, droppedEvents)) .build(); final TsdEvent event = new TsdEvent(Collections.emptyMap(), createQuantityMap("timer", TsdQuantity.newInstance(7d, null)), createQuantityMap("counter", TsdQuantity.newInstance(8d, null)), createQuantityMap("gauge", TsdQuantity.newInstance(9d, null))); // Add one event to be used as a synchronization point sink.record(event); semaphoreA.acquire(); // Add the actual events to analyze for (int x = 0; x < 10; x++) { sink.record(event); } semaphoreB.release(); semaphoreC.acquire(); // Ensure expected handler was invoked Assert.assertEquals(5, droppedEvents.get()); // Assert number of records received Assert.assertEquals(6, recordsReceived.get()); // Request matcher final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH)) .withHeader("Content-Type", WireMock.equalTo("application/octet-stream")); // Assert that data was sent _wireMockRule.verify(4, requestPattern); Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty()); }
From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java
/** * When all nodes have completed a request and provided a response (or have timed out), this method will be invoked * to handle calling the Callback that was provided for the request, if any, and handle any cleanup or post-processing * related to the request/*from w w w . ja v a 2s .com*/ * * @param requestId the ID of the request that has completed */ private void onCompletedResponse(final String requestId) { final AsyncClusterResponse response = responseMap.get(requestId); if (response != null && callback != null) { try { callback.afterRequest(response.getURIPath(), response.getMethod(), response.getCompletedNodeResponses()); } catch (final Exception e) { logger.warn( "Completed request {} {} but failed to properly handle the Request Completion Callback due to {}", response.getMethod(), response.getURIPath(), e.toString()); logger.warn("", e); } } if (response != null && logger.isDebugEnabled()) { logTimingInfo(response); } // If we have any nodes that are slow to respond, keep track of this. If the same node is slow 3 times in // a row, log a warning to indicate that the node is responding slowly. final Set<NodeIdentifier> slowResponseNodes = ResponseUtils.findLongResponseTimes(response, 1.5D); for (final NodeIdentifier nodeId : response.getNodesInvolved()) { final AtomicInteger counter = sequentialLongRequestCounts.computeIfAbsent(nodeId, id -> new AtomicInteger(0)); if (slowResponseNodes.contains(nodeId)) { final int sequentialLongRequests = counter.incrementAndGet(); if (sequentialLongRequests >= 3) { final String message = "Response time from " + nodeId + " was slow for each of the last 3 requests made. " + "To see more information about timing, enable DEBUG logging for " + logger.getName(); logger.warn(message); if (eventReporter != null) { eventReporter.reportEvent(Severity.WARNING, "Node Response Time", message); } counter.set(0); } } else { counter.set(0); } } }
From source file:org.glassfish.jersey.examples.sseitemstore.jersey.JerseyItemStoreResourceTest.java
/** * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}. * * @throws Exception in case of a test failure. *//*from ww w.ja va2s .c o m*/ @Test public void testItemsStore() throws Exception { final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz")); final WebTarget itemsTarget = target("items"); final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS); final EventSource[] sources = new EventSource[MAX_LISTENERS]; final AtomicInteger sizeEventsCount = new AtomicInteger(0); for (int i = 0; i < MAX_LISTENERS; i++) { final int id = i; final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build(); sources[id] = es; final Queue<Integer> indexes = new ConcurrentLinkedQueue<>(); indexQueues.add(indexes); es.register(inboundEvent -> { try { if (inboundEvent.getName() == null) { final String data = inboundEvent.readData(); LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data=" + data); indexes.add(items.indexOf(data)); } else if ("size".equals(inboundEvent.getName())) { sizeEventsCount.incrementAndGet(); } } catch (Exception ex) { LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex); indexes.add(-999); } finally { latch.countDown(); } }); } try { open(sources); for (String item : items) { postItem(itemsTarget, item); } assertTrue("Waiting to receive all events has timed out.", latch.await( (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(), TimeUnit.MILLISECONDS)); // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection sendCommand(itemsTarget, "disconnect"); } finally { close(sources); } String postedItems = itemsTarget.request().get(String.class); for (String item : items) { assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item)); } int queueId = 0; for (Queue<Integer> indexes : indexQueues) { for (int i = 0; i < items.size(); i++) { assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId, indexes.contains(i)); } assertEquals("Not received the expected number of events in queue " + queueId, items.size(), indexes.size()); queueId++; } assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS, sizeEventsCount.get()); }
From source file:com.netflix.curator.framework.recipes.atomic.TestDistributedAtomicLong.java
private void doSimulation(int executionQty, SummaryStatistics timingStats, AtomicInteger optimisticTries, AtomicInteger promotedLockTries, AtomicInteger failures, AtomicInteger errors) throws Exception { Random random = new Random(); long previousValue = -1; CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start();// w ww . ja va 2 s . c om try { RetryPolicy retryPolicy = new ExponentialBackoffRetry(3, 3); PromotedToLock.Builder builder = PromotedToLock.builder().lockPath("/lock").retryPolicy(retryPolicy); DistributedAtomicLong dal = new DistributedAtomicLong(client, "/counter", retryPolicy, builder.build()); for (int i = 0; i < executionQty; ++i) { Thread.sleep(random.nextInt(10)); long start = System.currentTimeMillis(); AtomicValue<Long> value = dal.increment(); long elapsed = System.currentTimeMillis() - start; timingStats.addValue(elapsed); if (value.succeeded()) { if (value.postValue() <= previousValue) { errors.incrementAndGet(); } previousValue = value.postValue(); } else { failures.incrementAndGet(); } optimisticTries.addAndGet(value.getStats().getOptimisticTries()); promotedLockTries.addAndGet(value.getStats().getPromotedLockTries()); } } finally { client.close(); } }
From source file:org.apache.hadoop.hbase.client.TestAsyncProcess.java
@Test public void testSubmitWithCB() throws Exception { ClusterConnection hc = createHConnection(); final AtomicInteger updateCalled = new AtomicInteger(0); Batch.Callback<Object> cb = new Batch.Callback<Object>() { @Override//from w ww.ja va2s . co m public void update(byte[] region, byte[] row, Object result) { updateCalled.incrementAndGet(); } }; AsyncProcess ap = new MyAsyncProcess(hc, conf); List<Put> puts = new ArrayList<Put>(); puts.add(createPut(1, true)); final AsyncRequestFuture ars = ap.submit(DUMMY_TABLE, puts, false, cb, false); Assert.assertTrue(puts.isEmpty()); ars.waitUntilDone(); Assert.assertEquals(updateCalled.get(), 1); }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphore.java
@Test public void testRelease1AtATime() throws Exception { final int CLIENT_QTY = 10; final int MAX = CLIENT_QTY / 2; final AtomicInteger maxLeases = new AtomicInteger(0); final AtomicInteger activeQty = new AtomicInteger(0); final AtomicInteger uses = new AtomicInteger(0); List<Future<Object>> futures = Lists.newArrayList(); ExecutorService service = Executors.newFixedThreadPool(CLIENT_QTY); for (int i = 0; i < CLIENT_QTY; ++i) { Future<Object> f = service.submit(new Callable<Object>() { @Override/*www . j a v a 2 s . c o m*/ public Object call() throws Exception { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start(); try { InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", MAX); Lease lease = semaphore.acquire(10, TimeUnit.SECONDS); Assert.assertNotNull(lease); uses.incrementAndGet(); try { synchronized (maxLeases) { int qty = activeQty.incrementAndGet(); if (qty > maxLeases.get()) { maxLeases.set(qty); } } Thread.sleep(500); } finally { activeQty.decrementAndGet(); lease.close(); } } finally { client.close(); } return null; } }); futures.add(f); } for (Future<Object> f : futures) { f.get(); } Assert.assertEquals(uses.get(), CLIENT_QTY); Assert.assertEquals(maxLeases.get(), MAX); }
From source file:com.streamsets.pipeline.stage.origin.spooldir.TestSpoolDirSource.java
@Test public void testAdvanceToNextSpoolFile() throws Exception { TSpoolDirSource source = createSource(null); PushSourceRunner runner = new PushSourceRunner.Builder(TSpoolDirSource.class, source).addOutputLane("lane") .build();//from w w w . j a v a 2 s .c o m File file1 = new File(source.spoolDir, "file-0.log").getAbsoluteFile(); Files.createFile(file1.toPath()); File file2 = new File(source.spoolDir, "file-1.log").getAbsoluteFile(); Files.createFile(file2.toPath()); source.file = file1; source.offset = 0; source.maxBatchSize = 10; AtomicInteger batchCount = new AtomicInteger(0); runner.runInit(); try { runner.runProduce(ImmutableMap.of(Source.POLL_SOURCE_OFFSET_KEY, "file-0.log::0"), 10, output -> { batchCount.incrementAndGet(); TSpoolDirRunnable runnable = source.getTSpoolDirRunnable(); if (batchCount.get() == 1) { Assert.assertEquals("file-0.log", output.getOffsetEntity()); Assert.assertEquals("{\"POS\":\"0\"}", output.getNewOffset()); Assert.assertTrue(runnable.produceCalled); Assert.assertEquals(1, runner.getEventRecords().size()); Assert.assertEquals("new-file", runner.getEventRecords().get(0).getEventType()); runnable.produceCalled = false; runnable.offsetIncrement = -1; } else if (batchCount.get() == 2) { Assert.assertEquals("file-0.log", output.getOffsetEntity()); Assert.assertEquals("{\"POS\":\"-1\"}", output.getNewOffset()); Assert.assertTrue(runnable.produceCalled); Assert.assertEquals(2, runner.getEventRecords().size()); Assert.assertEquals("new-file", runner.getEventRecords().get(0).getEventType()); Assert.assertEquals("finished-file", runner.getEventRecords().get(1).getEventType()); Assert.assertEquals(0, runner.getEventRecords().get(1).get("/error-count").getValueAsInteger()); Assert.assertEquals(0, runner.getEventRecords().get(1).get("/record-count").getValueAsInteger()); runnable.file = file2; } else if (batchCount.get() == 4) { runnable.produceCalled = false; runnable.offset = 0; runnable.offsetIncrement = 0; runner.setStop(); } else if (batchCount.get() > 4) { runner.setStop(); } }); runner.waitOnProduce(); Assert.assertEquals(4, batchCount.get()); TestOffsetUtil.compare("file-1.log::-1", runner.getOffsets()); Assert.assertFalse(source.produceCalled); // 2 each of new-file and finished-file and 1 no-more-data Assert.assertEquals(5, runner.getEventRecords().size()); // check for LineageEvents. List<LineageEvent> events = runner.getLineageEvents(); Assert.assertEquals(2, events.size()); Assert.assertEquals(LineageEventType.ENTITY_READ, events.get(0).getEventType()); Assert.assertEquals(LineageEventType.ENTITY_READ, events.get(1).getEventType()); Assert.assertTrue(events.get(0).getSpecificAttribute(LineageSpecificAttribute.ENTITY_NAME) .contains("file-0.log")); Assert.assertTrue(events.get(1).getSpecificAttribute(LineageSpecificAttribute.ENTITY_NAME) .contains("file-1.log")); } finally { runner.runDestroy(); } }