Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.keycloak.testsuite.admin.concurrency.ConcurrentLoginTest.java

@Test
public void concurrentCodeReuseShouldFail() throws Throwable {
    log.info("*********************************************");
    long start = System.currentTimeMillis();

    for (int i = 0; i < 10; i++) {
        OAuthClient oauth1 = new OAuthClient();
        oauth1.init(driver);//from   w ww  .  ja  va 2s .  c o  m
        oauth1.clientId("client0");

        OAuthClient.AuthorizationEndpointResponse resp = oauth1.doLogin("test-user@localhost", "password");
        String code = resp.getCode();
        Assert.assertNotNull(code);
        String codeURL = driver.getCurrentUrl();

        AtomicInteger codeToTokenSuccessCount = new AtomicInteger(0);
        AtomicInteger codeToTokenErrorsCount = new AtomicInteger(0);

        KeycloakRunnable codeToTokenTask = new KeycloakRunnable() {

            @Override
            public void run(int threadIndex, Keycloak keycloak, RealmResource realm) throws Throwable {
                log.infof("Trying to execute codeURL: %s, threadIndex: %d", codeURL, threadIndex);

                OAuthClient.AccessTokenResponse resp = oauth1.doAccessTokenRequest(code, "password");
                if (resp.getAccessToken() != null && resp.getError() == null) {
                    codeToTokenSuccessCount.incrementAndGet();
                } else if (resp.getAccessToken() == null && resp.getError() != null) {
                    codeToTokenErrorsCount.incrementAndGet();
                }
            }

        };

        run(DEFAULT_THREADS, DEFAULT_THREADS, codeToTokenTask);

        oauth1.openLogout();

        // Code should be successfully exchanged for the token at max once. In some cases (EG. Cross-DC) it may not be even successfully exchanged
        Assert.assertThat(codeToTokenSuccessCount.get(), Matchers.lessThanOrEqualTo(1));
        Assert.assertThat(codeToTokenErrorsCount.get(), Matchers.greaterThanOrEqualTo(DEFAULT_THREADS - 1));

        log.infof("Iteration %d passed successfully", i);
    }

    long end = System.currentTimeMillis() - start;
    log.info("concurrentCodeReuseShouldFail took " + (end / 1000) + "s");
    log.info("*********************************************");

}

From source file:org.cloudfoundry.identity.uaa.oauth.ClientAdminEndpoints.java

private void incrementErrorCounts(Exception e) {
    String series = UaaStringUtils.getErrorName(e);
    AtomicInteger value = errorCounts.get(series);
    if (value == null) {
        synchronized (errorCounts) {
            value = errorCounts.get(series);
            if (value == null) {
                value = new AtomicInteger();
                errorCounts.put(series, value);
            }//w  w  w .j a  v a  2  s . c om
        }
    }
    value.incrementAndGet();
}

From source file:es.us.lsi.restest.engine.UnirestTest.java

private void makeParallelRequests() throws InterruptedException {
    ExecutorService newFixedThreadPool = Executors.newFixedThreadPool(10);
    final AtomicInteger counter = new AtomicInteger(0);
    for (int i = 0; i < 200; i++) {
        newFixedThreadPool.execute(new Runnable() {
            public void run() {
                try {
                    Unirest.get("http://httpbin.org/get").queryString("index", counter.incrementAndGet())
                            .asJson();//from   w  w  w.j  a  v a 2s . c om
                } catch (UnirestException e) {
                    throw new RuntimeException(e);
                }
            }
        });
    }

    newFixedThreadPool.shutdown();
    newFixedThreadPool.awaitTermination(10, TimeUnit.MINUTES);
}

From source file:org.apache.bookkeeper.replication.TestLedgerUnderreplicationManager.java

/**
 * Test that as the hierarchy gets cleaned up, it doesn't interfere
 * with the marking of other ledgers as underreplicated
 *///from   w  w w  .  j av  a  2  s .c o m
@Test(timeout = 90000)
public void testHierarchyCleanupInterference() throws Exception {
    final LedgerUnderreplicationManager replicaMgr1 = lmf1.newLedgerUnderreplicationManager();
    final LedgerUnderreplicationManager replicaMgr2 = lmf2.newLedgerUnderreplicationManager();

    final int iterations = 100;
    final AtomicBoolean threadFailed = new AtomicBoolean(false);
    Thread markUnder = new Thread() {
        public void run() {
            long l = 1;
            try {
                for (int i = 0; i < iterations; i++) {
                    replicaMgr1.markLedgerUnderreplicated(l, "localhost:3181");
                    l += 10000;
                }
            } catch (Exception e) {
                LOG.error("markUnder Thread failed with exception", e);
                threadFailed.set(true);
                return;
            }
        }
    };
    final AtomicInteger processed = new AtomicInteger(0);
    Thread markRepl = new Thread() {
        public void run() {
            try {
                for (int i = 0; i < iterations; i++) {
                    long l = replicaMgr2.getLedgerToRereplicate();
                    replicaMgr2.markLedgerReplicated(l);
                    processed.incrementAndGet();
                }
            } catch (Exception e) {
                LOG.error("markRepl Thread failed with exception", e);
                threadFailed.set(true);
                return;
            }
        }
    };
    markRepl.setDaemon(true);
    markUnder.setDaemon(true);

    markRepl.start();
    markUnder.start();
    markUnder.join();
    assertFalse("Thread failed to complete", threadFailed.get());

    int lastProcessed = 0;
    while (true) {
        markRepl.join(10000);
        if (!markRepl.isAlive()) {
            break;
        }
        assertFalse("markRepl thread not progressing", lastProcessed == processed.get());
    }
    assertFalse("Thread failed to complete", threadFailed.get());

    List<String> children = zkc1.getChildren(urLedgerPath, false);
    for (String s : children) {
        LOG.info("s: {}", s);
    }
    assertEquals("All hierarchies should be cleaned up", 0, children.size());
}

From source file:org.apache.qpid.systest.management.jmx.QueueManagementTest.java

private void startAsyncConsumerOn(Destination queue, Connection asyncConnection,
        final CountDownLatch requiredNumberOfMessagesRead, final AtomicInteger totalConsumed) throws Exception {
    Session session = asyncConnection.createSession(false, Session.AUTO_ACKNOWLEDGE);
    MessageConsumer consumer = session.createConsumer(queue);
    consumer.setMessageListener(new MessageListener() {

        @Override//  w w w  .ja va2s .  co  m
        public void onMessage(Message arg0) {
            totalConsumed.incrementAndGet();
            requiredNumberOfMessagesRead.countDown();
        }
    });
}

From source file:jcuda.jcublas.kernel.TestMultipleThreads.java

@Test
public void testMultipleThreads() throws InterruptedException {
    int numThreads = 10;
    final INDArray array = Nd4j.rand(3000, 3000);
    final INDArray expected = array.dup().mmul(array).mmul(array).div(array).div(array);
    final AtomicInteger correct = new AtomicInteger();
    final CountDownLatch latch = new CountDownLatch(numThreads);

    ExecutorService executors = Executors.newCachedThreadPool();

    for (int x = 0; x < numThreads; x++) {
        executors.execute(new Runnable() {
            @Override/*w  w  w  .  j a  va 2s . c o  m*/
            public void run() {
                try {
                    int total = 10;
                    int right = 0;
                    for (int x = 0; x < total; x++) {
                        StopWatch watch = new StopWatch();
                        watch.start();
                        INDArray actual = array.dup().mmul(array).mmul(array).div(array).div(array);
                        watch.stop();
                        System.out.println("MMUL took " + watch.getTime());
                        if (expected.equals(actual))
                            right++;
                    }

                    if (total == right)
                        correct.incrementAndGet();
                } finally {
                    latch.countDown();
                }

            }
        });
    }

    latch.await();

    assertEquals(numThreads, correct.get());

}

From source file:io.realm.Realm.java

private static synchronized Realm createAndValidate(RealmConfiguration configuration, boolean validateSchema,
        boolean autoRefresh) {
    // Start the finalizer thread if needed
    if (!isFinalizerStarted) {
        executorService.submit(new FinalizerRunnable());
        isFinalizerStarted = true;//from   ww w .j a va2s  .  co  m
    }

    // Check if a cached instance already exists for this thread
    String canonicalPath = configuration.getPath();
    Map<RealmConfiguration, Integer> localRefCount = referenceCount.get();
    Integer references = localRefCount.get(configuration);
    if (references == null) {
        references = 0;
    }
    Map<RealmConfiguration, Realm> realms = realmsCache.get();
    Realm realm = realms.get(configuration);
    if (realm != null) {
        localRefCount.put(configuration, references + 1);
        return realm;
    }

    // Create new Realm and cache it. All exception code paths must close the Realm otherwise we risk serving
    // faulty cache data.
    validateAgainstExistingConfigurations(configuration);
    realm = new Realm(configuration, autoRefresh);
    realms.put(configuration, realm);
    localRefCount.put(configuration, references + 1);

    // Increment global reference counter
    if (references == 0) {
        AtomicInteger counter = globalOpenInstanceCounter.get(canonicalPath);
        if (counter == null) {
            globalOpenInstanceCounter.put(canonicalPath, new AtomicInteger(1));
        } else {
            counter.incrementAndGet();
        }
    }

    // Check versions of Realm
    long currentVersion = realm.getVersion();
    long requiredVersion = configuration.getSchemaVersion();
    if (currentVersion != UNVERSIONED && currentVersion < requiredVersion && validateSchema) {
        realm.close();
        throw new RealmMigrationNeededException(canonicalPath, String
                .format("Realm on disc need to migrate from v%s to v%s", currentVersion, requiredVersion));
    }
    if (currentVersion != UNVERSIONED && requiredVersion < currentVersion && validateSchema) {
        realm.close();
        throw new IllegalArgumentException(String.format(
                "Realm on disc is newer than the one specified: v%s vs. v%s", currentVersion, requiredVersion));
    }

    // Initialize Realm schema if needed
    if (validateSchema) {
        try {
            initializeRealm(realm);
        } catch (RuntimeException e) {
            realm.close();
            throw e;
        }
    }

    return realm;
}

From source file:org.apache.sshd.common.forward.PortForwardingLoadTest.java

@Test
public void testRemoteForwardingPayload() throws Exception {
    final int numIterations = 100;
    final String payload = "This is significantly longer Test Data. This is significantly "
            + "longer Test Data. This is significantly longer Test Data. This is significantly "
            + "longer Test Data. This is significantly longer Test Data. This is significantly "
            + "longer Test Data. This is significantly longer Test Data. This is significantly "
            + "longer Test Data. ";
    Session session = createSession();/* www .  j a  v  a2s.  com*/
    try (ServerSocket ss = new ServerSocket()) {
        ss.setReuseAddress(true);
        ss.bind(new InetSocketAddress((InetAddress) null, 0));
        int forwardedPort = ss.getLocalPort();
        int sinkPort = Utils.getFreePort();
        session.setPortForwardingR(sinkPort, TEST_LOCALHOST, forwardedPort);
        final boolean started[] = new boolean[1];
        started[0] = false;
        final AtomicInteger conCount = new AtomicInteger(0);

        Thread tWriter = new Thread(getCurrentTestName() + "Writer") {
            @SuppressWarnings("synthetic-access")
            @Override
            public void run() {
                started[0] = true;
                try {
                    byte[] bytes = payload.getBytes(StandardCharsets.UTF_8);
                    for (int i = 0; i < numIterations; ++i) {
                        try (Socket s = ss.accept()) {
                            conCount.incrementAndGet();

                            try (OutputStream sockOut = s.getOutputStream()) {
                                sockOut.write(bytes);
                                sockOut.flush();
                            }
                        }
                    }
                } catch (Exception e) {
                    log.error("Failed to complete run loop", e);
                }
            }
        };
        tWriter.start();
        Thread.sleep(TimeUnit.SECONDS.toMillis(1L));
        assertTrue("Server not started", started[0]);

        final RuntimeException lenOK[] = new RuntimeException[numIterations];
        final RuntimeException dataOK[] = new RuntimeException[numIterations];
        byte b2[] = new byte[payload.length()];
        byte b1[] = new byte[b2.length / 2];

        for (int i = 0; i < numIterations; i++) {
            final int ii = i;
            try (Socket s = new Socket(TEST_LOCALHOST, sinkPort); InputStream sockIn = s.getInputStream()) {
                s.setSoTimeout((int) TimeUnit.SECONDS.toMillis(10L));

                int read1 = sockIn.read(b1);
                String part1 = new String(b1, 0, read1, StandardCharsets.UTF_8);
                Thread.sleep(50);

                int read2 = sockIn.read(b2);
                String part2 = new String(b2, 0, read2, StandardCharsets.UTF_8);
                int totalRead = read1 + read2;
                lenOK[ii] = (payload.length() == totalRead) ? null
                        : new IndexOutOfBoundsException(
                                "Mismatched length: expected=" + payload.length() + ", actual=" + totalRead);

                String readData = part1 + part2;
                dataOK[ii] = payload.equals(readData) ? null : new IllegalStateException("Mismatched content");
                if (lenOK[ii] != null) {
                    throw lenOK[ii];
                }

                if (dataOK[ii] != null) {
                    throw dataOK[ii];
                }
            } catch (Exception e) {
                if (e instanceof IOException) {
                    log.warn("I/O exception in iteration #" + i, e);
                } else {
                    log.error("Failed to complete iteration #" + i, e);
                }
            }
        }
        int ok = 0;
        for (int i = 0; i < numIterations; i++) {
            ok += (lenOK[i] == null) ? 1 : 0;
        }
        log.info("Successful iteration: " + ok + " out of " + numIterations);
        Thread.sleep(TimeUnit.SECONDS.toMillis(1L));
        for (int i = 0; i < numIterations; i++) {
            assertNull("Bad length at iteration " + i, lenOK[i]);
            assertNull("Bad data at iteration " + i, dataOK[i]);
        }
        Thread.sleep(TimeUnit.SECONDS.toMillis(1L));
        session.delPortForwardingR(forwardedPort);
        ss.close();
        tWriter.join(TimeUnit.SECONDS.toMillis(11L));
    } finally {
        session.disconnect();
    }
}

From source file:org.apache.hadoop.hbase.client.TestAdmin2.java

@Test(timeout = 300000)
public void testCreateBadTables() throws IOException {
    String msg = null;//  w w w  .  j a va 2  s  .  c  o m
    try {
        this.admin.createTable(new HTableDescriptor(TableName.META_TABLE_NAME));
    } catch (TableExistsException e) {
        msg = e.toString();
    }
    assertTrue("Unexcepted exception message " + msg,
            msg != null && msg.startsWith(TableExistsException.class.getName())
                    && msg.contains(TableName.META_TABLE_NAME.getNameAsString()));

    // Now try and do concurrent creation with a bunch of threads.
    final HTableDescriptor threadDesc = new HTableDescriptor(TableName.valueOf("threaded_testCreateBadTables"));
    threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    int count = 10;
    Thread[] threads = new Thread[count];
    final AtomicInteger successes = new AtomicInteger(0);
    final AtomicInteger failures = new AtomicInteger(0);
    final Admin localAdmin = this.admin;
    for (int i = 0; i < count; i++) {
        threads[i] = new Thread(Integer.toString(i)) {
            @Override
            public void run() {
                try {
                    localAdmin.createTable(threadDesc);
                    successes.incrementAndGet();
                } catch (TableExistsException e) {
                    failures.incrementAndGet();
                } catch (IOException e) {
                    throw new RuntimeException("Failed threaded create" + getName(), e);
                }
            }
        };
    }
    for (int i = 0; i < count; i++) {
        threads[i].start();
    }
    for (int i = 0; i < count; i++) {
        while (threads[i].isAlive()) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                // continue
            }
        }
    }
    // All threads are now dead.  Count up how many tables were created and
    // how many failed w/ appropriate exception.
    assertEquals(1, successes.get());
    assertEquals(count - 1, failures.get());
}

From source file:org.apache.hadoop.hbase.tool.TestLoadIncrementalHFilesSplitRecovery.java

/**
 * This test exercises the path where there is a split after initial validation but before the
 * atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a
 * split just before the atomic region load.
 *//*from w ww.ja v  a2s . c o  m*/
@Test(timeout = 120000)
public void testSplitWhileBulkLoadPhase() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(table, ROWCOUNT, 1);

        // Now let's cause trouble. This will occur after checks and cause bulk
        // files to fail when attempt to atomically import. This is recoverable.
        final AtomicInteger attemptedCalls = new AtomicInteger();
        LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {
            @Override
            protected void bulkLoadPhase(final Table htable, final Connection conn, ExecutorService pool,
                    Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups,
                    boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
                int i = attemptedCalls.incrementAndGet();
                if (i == 1) {
                    // On first attempt force a split.
                    forceSplit(table);
                }
                super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
            }
        };

        // create HFiles for different column families
        try (Table t = connection.getTable(table);
                RegionLocator locator = connection.getRegionLocator(table);
                Admin admin = connection.getAdmin()) {
            Path bulk = buildBulkFiles(table, 2);
            lih2.doBulkLoad(bulk, admin, t, locator);
        }

        // check that data was loaded
        // The three expected attempts are 1) failure because need to split, 2)
        // load of split top 3) load of split bottom
        assertEquals(attemptedCalls.get(), 3);
        assertExpectedTable(table, ROWCOUNT, 2);
    }
}