Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testCheckAndMutate() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 10;
    CountDownLatch putLatch = new CountDownLatch(count + 1);
    table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown());
    IntStream.range(0, count)//from   ww  w. ja v  a  2  s.co  m
            .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE))
                    .thenRun(() -> putLatch.countDown()));
    putLatch.await();

    AtomicInteger successCount = new AtomicInteger(0);
    AtomicInteger successIndex = new AtomicInteger(-1);
    CountDownLatch mutateLatch = new CountDownLatch(count);
    IntStream.range(0, count).forEach(i -> {
        RowMutations mutation = new RowMutations(row);
        try {
            mutation.add(new Delete(row).addColumn(FAMILY, QUALIFIER));
            mutation.add(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), concat(VALUE, i)));
        } catch (IOException e) {
            throw new UncheckedIOException(e);
        }
        table.checkAndMutate(row, FAMILY, QUALIFIER, VALUE, mutation).thenAccept(x -> {
            if (x) {
                successCount.incrementAndGet();
                successIndex.set(i);
            }
            mutateLatch.countDown();
        });
    });
    mutateLatch.await();
    assertEquals(1, successCount.get());
    Result result = table.get(new Get(row)).get();
    IntStream.range(0, count).forEach(i -> {
        if (i == successIndex.get()) {
            assertArrayEquals(concat(VALUE, i), result.getValue(FAMILY, concat(QUALIFIER, i)));
        } else {
            assertArrayEquals(VALUE, result.getValue(FAMILY, concat(QUALIFIER, i)));
        }
    });
}

From source file:com.yahoo.pulsar.broker.service.PersistentQueueE2ETest.java

@Test(enabled = false)
public void testRoundRobinBatchDistribution() throws Exception {
    final String topicName = "persistent://prop/use/ns-abc/shared-topic5";
    final String subName = "sub5";
    final int numMsgs = 137; /* some random number different than default batch size of 100 */

    final AtomicInteger counter1 = new AtomicInteger(0);
    final AtomicInteger counter2 = new AtomicInteger(0);
    final AtomicInteger counter3 = new AtomicInteger(0);

    final CountDownLatch latch = new CountDownLatch(numMsgs * 3);

    ConsumerConfiguration conf1 = new ConsumerConfiguration();
    conf1.setSubscriptionType(SubscriptionType.Shared);
    conf1.setReceiverQueueSize(10);// w ww  . j  a  v  a  2  s  .  c o m
    conf1.setMessageListener((consumer, msg) -> {
        try {
            counter1.incrementAndGet();
            consumer.acknowledge(msg);
            latch.countDown();
        } catch (Exception e) {
            fail("Should not fail");
        }
    });

    ConsumerConfiguration conf2 = new ConsumerConfiguration();
    conf2.setSubscriptionType(SubscriptionType.Shared);
    conf2.setReceiverQueueSize(10);
    conf2.setMessageListener((consumer, msg) -> {
        try {
            counter2.incrementAndGet();
            consumer.acknowledge(msg);
            latch.countDown();
        } catch (Exception e) {
            fail("Should not fail");
        }
    });

    ConsumerConfiguration conf3 = new ConsumerConfiguration();
    conf3.setSubscriptionType(SubscriptionType.Shared);
    conf3.setReceiverQueueSize(10);
    conf3.setMessageListener((consumer, msg) -> {
        try {
            counter3.incrementAndGet();
            consumer.acknowledge(msg);
            latch.countDown();
        } catch (Exception e) {
            fail("Should not fail");
        }
    });

    // subscribe and close, so that distribution can be checked after
    // all messages are published
    Consumer consumer1 = pulsarClient.subscribe(topicName, subName, conf1);
    Consumer consumer2 = pulsarClient.subscribe(topicName, subName, conf2);
    Consumer consumer3 = pulsarClient.subscribe(topicName, subName, conf3);

    List<CompletableFuture<MessageId>> futures = Lists.newArrayListWithCapacity(numMsgs);
    Producer producer = pulsarClient.createProducer(topicName);
    for (int i = 0; i < numMsgs * 3; i++) {
        String message = "msg-" + i;
        futures.add(producer.sendAsync(message.getBytes()));
    }
    FutureUtil.waitForAll(futures).get();
    producer.close();

    latch.await(1, TimeUnit.SECONDS);

    /*
     * total messages = 137 * 3 = 411 Each consumer has 10 permits. There will be 411 / 3*10 = 13 full distributions
     * i.e. each consumer will get 130 messages. In the 14th round, the balance is 411 - 130*3 = 21. Two consumers
     * will get another batch of 10 messages (Total: 140) and the 3rd one will get the last one (Total: 131)
     */
    assertTrue(CollectionUtils.subtract(Lists.newArrayList(140, 140, 131),
            Lists.newArrayList(counter1.get(), counter2.get(), counter3.get())).isEmpty());

    consumer1.close();
    consumer2.close();
    consumer3.close();
    admin.persistentTopics().delete(topicName);
}

From source file:com.radiohitwave.ftpsync.API.java

public void DeleteNonExistingFiles(String localBasePath) {
    Log.info("Searching for non-existing Files");
    String[] remoteFiles = this.GetRemoteFileList();
    final String finalLocalPath = this.RemoveTrailingSlash(localBasePath);
    AtomicInteger logDeletedFiles = new AtomicInteger();

    try {//from   w  w  w .  j a v a2  s  .  c o  m
        Files.walk(Paths.get(localBasePath)).forEach(filePath -> {
            if (Files.isRegularFile(filePath)) {
                String file = filePath.toString().replace(finalLocalPath, "");
                file = file.substring(1);
                file = file.replace("\\", "/");
                if (Arrays.binarySearch(remoteFiles, file) < 0) {
                    try {
                        Log.info("Deleting <" + file + ">");
                        Files.delete(filePath);
                        logDeletedFiles.incrementAndGet();
                    } catch (IOException ex) {
                        Log.error("Cant delete File:" + ex.toString());
                        Log.remoteError(ex);
                    }
                }
            }
        });
        Files.walk(Paths.get(localBasePath)).forEach(filePath -> {
            if (Files.isDirectory(filePath)) {
                String file = filePath.toString().replace(finalLocalPath + "/", "");
                try {
                    Files.delete(filePath);
                    Log.info("Deleting empty Directory <" + file + ">");
                } catch (IOException e) {

                }
            }
        });
    } catch (IOException ex) {
        Log.error(ex.toString());
    }
    Log.info("Disk-Cleanup finished, deleted " + logDeletedFiles.get() + " Files");
    if (logDeletedFiles.get() > this.nonExistingFilesThreesholdBeforeAlert) {
        Log.remoteInfo("Deleted " + logDeletedFiles.get() + " non-existing Files");
    }

}

From source file:org.apache.hedwig.server.delivery.TestThrottlingDelivery.java

private void throttleX(Publisher pub, final Subscriber sub, ByteString topic, ByteString subid, final int X)
        throws Exception {
    for (int i = 1; i <= 3 * X; i++) {
        pub.publish(topic, Message.newBuilder().setBody(ByteString.copyFromUtf8(String.valueOf(i))).build());
    }//from  w  ww .j a  v  a2  s  .  c om
    SubscriptionOptions opts = SubscriptionOptions.newBuilder().setCreateOrAttach(CreateOrAttach.ATTACH)
            .build();
    sub.subscribe(topic, subid, opts);

    final AtomicInteger expected = new AtomicInteger(1);
    final CountDownLatch throttleLatch = new CountDownLatch(1);
    final CountDownLatch nonThrottleLatch = new CountDownLatch(1);
    sub.startDelivery(topic, subid, new MessageHandler() {
        @Override
        public synchronized void deliver(ByteString topic, ByteString subscriberId, Message msg,
                Callback<Void> callback, Object context) {
            try {
                int value = Integer.valueOf(msg.getBody().toStringUtf8());
                logger.debug("Received message {},", value);

                if (value == expected.get()) {
                    expected.incrementAndGet();
                } else {
                    // error condition
                    logger.error("Did not receive expected value, expected {}, got {}", expected.get(), value);
                    expected.set(0);
                    throttleLatch.countDown();
                    nonThrottleLatch.countDown();
                }
                if (expected.get() > X + 1) {
                    throttleLatch.countDown();
                }
                if (expected.get() == (3 * X + 1)) {
                    nonThrottleLatch.countDown();
                }
                callback.operationFinished(context, null);
                if (expected.get() > X + 1) {
                    sub.consume(topic, subscriberId, msg.getMsgId());
                }
            } catch (Exception e) {
                logger.error("Received bad message", e);
                throttleLatch.countDown();
                nonThrottleLatch.countDown();
            }
        }
    });
    assertFalse("Received more messages than throttle value " + X, throttleLatch.await(3, TimeUnit.SECONDS));
    assertEquals("Should be expected messages with only " + (X + 1), X + 1, expected.get());

    // consume messages to not throttle it
    for (int i = 1; i <= X; i++) {
        sub.consume(topic, subid, MessageSeqId.newBuilder().setLocalComponent(i).build());
    }

    assertTrue("Timed out waiting for messages " + (3 * X + 1), nonThrottleLatch.await(10, TimeUnit.SECONDS));
    assertEquals("Should be expected message with " + (3 * X + 1), 3 * X + 1, expected.get());

    sub.stopDelivery(topic, subid);
    sub.closeSubscription(topic, subid);
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.ScriptEnginesTest.java

@Test
public void shouldFailUntilImportExecutes() throws Exception {
    final ScriptEngines engines = new ScriptEngines(se -> {
    });//from   ww  w .j  a  v a2 s  .c  o  m
    engines.reload("gremlin-groovy", Collections.<String>emptySet(), Collections.<String>emptySet(),
            Collections.emptyMap());

    final Set<String> imports = new HashSet<String>() {
        {
            add("import java.awt.Color");
        }
    };

    final AtomicInteger successes = new AtomicInteger(0);
    final AtomicInteger failures = new AtomicInteger(0);

    final Thread threadImport = new Thread(() -> {
        engines.addImports(imports);
    });

    // issue 1000 scripts in one thread using a class that isn't imported.  this will result in failure.
    // while that thread is running start a new thread that issues an addImports to include that class.
    // this should block further evals in the first thread until the import is complete at which point
    // evals in the first thread will resume and start to succeed
    final Thread threadEvalAndTriggerImport = new Thread(() -> IntStream.range(0, 1000).forEach(i -> {
        try {
            engines.eval("Color.BLACK", new SimpleBindings(), "gremlin-groovy");
            successes.incrementAndGet();
        } catch (Exception ex) {
            if (failures.incrementAndGet() == 500)
                threadImport.start();
            Thread.yield();
        }
    }));

    threadEvalAndTriggerImport.start();

    threadEvalAndTriggerImport.join();
    threadImport.join();

    assertTrue("Success: " + successes.intValue() + " - Failures: " + failures.intValue(),
            successes.intValue() > 0);
    assertTrue("Success: " + successes.intValue() + " - Failures: " + failures.intValue(),
            failures.intValue() >= 500);

    engines.close();
}

From source file:org.alfresco.bm.event.mongo.MongoResultServiceTest.java

/**
 * Create some results but then search for something that does not match
 *//*  ww w  . ja va 2 s  .  c o m*/
@Test
public void getZeroResultsUsingHandler() {
    pumpRecords(10);
    long after = resultService.getLastResult().getStartTime() + TimeUnit.HOURS.toMillis(1); // Make sure the query window is out of range

    final AtomicInteger count = new AtomicInteger();
    resultService.getResults(new ResultHandler() {
        @Override
        public boolean processResult(long fromTime, long toTime,
                Map<String, DescriptiveStatistics> statsByEventName, Map<String, Integer> failuresByEventName)
                throws Throwable {
            // Check that we have a failure count for each event
            if (failuresByEventName.size() != statsByEventName.size()) {
                throw new RuntimeException("Didn't have a failure count matching stats count.");
            }
            // Increment
            count.incrementAndGet();
            return true;
        }
    }, after, 20L, 10L, false);

    // Check
    assertEquals(0, count.get());
}

From source file:org.eclipse.hono.deviceregistry.FileBasedCredentialsService.java

Future<Void> saveToFile() {

    if (!getConfig().isSaveToFile()) {
        return Future.succeededFuture();
    } else if (dirty) {
        return checkFileExists(true).compose(s -> {
            final AtomicInteger idCount = new AtomicInteger();
            final JsonArray tenants = new JsonArray();
            for (final Entry<String, Map<String, JsonArray>> entry : credentials.entrySet()) {
                final JsonArray credentialsArray = new JsonArray();
                for (final JsonArray singleAuthIdCredentials : entry.getValue().values()) {
                    credentialsArray.addAll(singleAuthIdCredentials.copy());
                    idCount.incrementAndGet();
                }/*from   ww w .jav a  2  s.co m*/
                tenants.add(new JsonObject().put(FIELD_TENANT, entry.getKey()).put(ARRAY_CREDENTIALS,
                        credentialsArray));
            }
            final Future<Void> writeHandler = Future.future();
            vertx.fileSystem().writeFile(getConfig().getFilename(),
                    Buffer.buffer(tenants.encodePrettily(), StandardCharsets.UTF_8.name()),
                    writeHandler.completer());
            return writeHandler.map(ok -> {
                dirty = false;
                log.trace("successfully wrote {} credentials to file {}", idCount.get(),
                        getConfig().getFilename());
                return (Void) null;
            }).otherwise(t -> {
                log.warn("could not write credentials to file {}", getConfig().getFilename(), t);
                return (Void) null;
            });
        });
    } else {
        log.trace("credentials registry does not need to be persisted");
        return Future.succeededFuture();
    }
}

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

@Test(timeout = 60000)
public void testDownloadPublicWithStatCache()
        throws IOException, URISyntaxException, InterruptedException, ExecutionException {
    final Configuration conf = new Configuration();
    FileContext files = FileContext.getLocalFSFileContext(conf);
    Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));

    // if test directory doesn't have ancestor permission, skip this test
    FileSystem f = basedir.getFileSystem(conf);
    assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f, basedir, null));

    files.mkdir(basedir, null, true);//from w w  w .j a  v  a  2  s.c o m
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    int size = 512;

    final ConcurrentMap<Path, AtomicInteger> counts = new ConcurrentHashMap<Path, AtomicInteger>();
    final CacheLoader<Path, Future<FileStatus>> loader = FSDownload.createStatusCacheLoader(conf);
    final LoadingCache<Path, Future<FileStatus>> statCache = CacheBuilder.newBuilder()
            .build(new CacheLoader<Path, Future<FileStatus>>() {
                public Future<FileStatus> load(Path path) throws Exception {
                    // increment the count
                    AtomicInteger count = counts.get(path);
                    if (count == null) {
                        count = new AtomicInteger(0);
                        AtomicInteger existing = counts.putIfAbsent(path, count);
                        if (existing != null) {
                            count = existing;
                        }
                    }
                    count.incrementAndGet();

                    // use the default loader
                    return loader.load(path);
                }
            });

    // test FSDownload.isPublic() concurrently
    final int fileCount = 3;
    List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
    for (int i = 0; i < fileCount; i++) {
        Random rand = new Random();
        long sharedSeed = rand.nextLong();
        rand.setSeed(sharedSeed);
        System.out.println("SEED: " + sharedSeed);
        final Path path = new Path(basedir, "test-file-" + i);
        createFile(files, path, size, rand);
        final FileSystem fs = path.getFileSystem(conf);
        final FileStatus sStat = fs.getFileStatus(path);
        tasks.add(new Callable<Boolean>() {
            public Boolean call() throws IOException {
                return FSDownload.isPublic(fs, path, sStat, statCache);
            }
        });
    }

    ExecutorService exec = Executors.newFixedThreadPool(fileCount);
    try {
        List<Future<Boolean>> futures = exec.invokeAll(tasks);
        // files should be public
        for (Future<Boolean> future : futures) {
            assertTrue(future.get());
        }
        // for each path exactly one file status call should be made
        for (AtomicInteger count : counts.values()) {
            assertSame(count.get(), 1);
        }
    } finally {
        exec.shutdown();
    }
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexAugmentTest.java

@Test
public void indexAugmentorMismatchedNodeType() throws Exception {
    //setup repo and index
    NodeTypeRegistry.register(root, IOUtils.toInputStream(TestUtil.TEST_NODE_TYPE), "test nodeType");
    Tree props = createIndex(TestUtil.NT_TEST);
    TestUtil.enableForFullText(props, "foo1");
    root.commit();//from  w  ww  . ja v  a  2  s .  c  om

    //setup augmentors
    final AtomicInteger indexingCounter1 = new AtomicInteger(0);
    final AtomicInteger indexingCounter2 = new AtomicInteger(0);
    factory.registerIndexFieldProvider(new IndexFieldProvider() {
        @Nonnull
        @Override
        public Iterable<Field> getAugmentedFields(String path, NodeState document, NodeState indexDefinition) {
            indexingCounter1.incrementAndGet();
            return IndexFieldProvider.DEFAULT.getAugmentedFields(path, document, indexDefinition);
        }

        @Nonnull
        @Override
        public Set<String> getSupportedTypes() {
            return Collections.singleton(JcrConstants.NT_BASE);
        }
    });
    factory.registerIndexFieldProvider(new IndexFieldProvider() {
        @Nonnull
        @Override
        public Iterable<Field> getAugmentedFields(String path, NodeState document, NodeState indexDefinition) {
            indexingCounter2.incrementAndGet();
            return IndexFieldProvider.DEFAULT.getAugmentedFields(path, document, indexDefinition);
        }

        @Nonnull
        @Override
        public Set<String> getSupportedTypes() {
            return Collections.singleton(TestUtil.NT_TEST);
        }
    });
    factory.useSuperBehavior = true;

    //add content
    createNodeWithType(root.getTree("/"), "node1", TestUtil.NT_TEST).setProperty("foo1", "bar1");
    root.commit();

    assertEquals("Mismatching node type should not let index augmentor called", 0, indexingCounter1.get());
    assertEquals("Matching node type should get augmentor called", 1, indexingCounter2.get());
}

From source file:org.alfresco.bm.event.mongo.MongoResultServiceTest.java

/**
 * Test the case where the reporting period is smaller than the stats window
 *//*  w  w  w  .  j  a v a2s.  c  o  m*/
@Test
public void getCheckedResultsUsingHandler() {
    pumpRecords(10);

    final AtomicInteger count = new AtomicInteger();
    final Map<String, DescriptiveStatistics> lastStatsByEventName = new HashMap<String, DescriptiveStatistics>(
            17);

    resultService.getResults(new ResultHandler() {
        @Override
        public boolean processResult(long fromTime, long toTime,
                Map<String, DescriptiveStatistics> statsByEventName, Map<String, Integer> failuresByEventName)
                throws Throwable {
            // Always keep the last stats
            lastStatsByEventName.clear();
            lastStatsByEventName.putAll(statsByEventName);

            count.incrementAndGet();
            return true;
        }
    }, 0L, 200L, 10L, false);
    // Check
    assertEquals(10, count.get());

    // Now go through the last stats received
    // Check it against the last window size
    List<String> names = resultService.getEventNames();
    for (String eventName : names) {
        List<EventRecord> eventResults = resultService.getResults(eventName, 0, 1000);
        DescriptiveStatistics eventStats = new DescriptiveStatistics();
        for (EventRecord eventRecord : eventResults) {
            eventStats.addValue(eventRecord.getTime());
        }
        DescriptiveStatistics lastEventStats = lastStatsByEventName.get(eventName);
        assertNotNull("No last report for event '" + eventName + "'.", lastEventStats);
        // Now check that this matched the last report exactly
        assertEquals("Mean for '" + eventName + "' was not correct. ", (long) Math.floor(eventStats.getMean()),
                (long) Math.floor(lastStatsByEventName.get(eventName).getMean()));
    }
}