Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.couchbase.mock.client.ClientViewTest.java

@Test
public void testViewQueryWithListener() throws Exception {
    final Query query = new Query();
    query.setReduce(false);//from ww w  .java 2s . c o m

    HttpFuture<View> future = client.asyncGetView(DESIGN_DOC_W_REDUCE, VIEW_NAME_W_REDUCE);

    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicInteger callCount = new AtomicInteger(0);
    future.addListener(new HttpCompletionListener() {
        @Override
        public void onComplete(HttpFuture<?> f) throws Exception {
            View view = (View) f.get();
            HttpFuture<ViewResponse> queryFuture = client.asyncQuery(view, query);
            queryFuture.addListener(new HttpCompletionListener() {
                @Override
                public void onComplete(HttpFuture<?> f) throws Exception {
                    ViewResponse resp = (ViewResponse) f.get();
                    if (resp.size() == ITEMS.size()) {
                        callCount.incrementAndGet();
                        latch.countDown();
                    }
                }
            });
        }
    });

    assertTrue(latch.await(3, TimeUnit.SECONDS));
    assertEquals(1, callCount.get());
}

From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java

@Test
public void testLockConsistency() throws Exception {
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();

    conf.setLedgerDirNames(ledgerDirs);/*from   w w w .j  a  v a2 s  .co m*/
    conf.setEntryLogFilePreAllocationEnabled(false);
    conf.setEntryLogPerLedgerEnabled(true);
    conf.setMaximumNumberOfActiveEntryLogs(5);

    CountDownLatch latch = new CountDownLatch(1);
    AtomicInteger count = new AtomicInteger(0);

    /*
     * Inject wait operation in 'getWritableLedgerDirsForNewLog' method of
     * ledgerDirsManager. getWritableLedgerDirsForNewLog will be called when
     * entryLogManager.createNewLog is called.
     */
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())) {
        /*
         * getWritableLedgerDirsForNewLog is called for the first time, it
         * will await on 'latch' latch before calling super
         * getWritableLedgerDirsForNewLog.
         */
        @Override
        public List<File> getWritableLedgerDirsForNewLog() throws NoWritableLedgerDirException {
            if (count.incrementAndGet() == 1) {
                try {
                    latch.await();
                } catch (InterruptedException e) {
                    LOG.error("Got InterruptedException while awaiting for latch countdown", e);
                }
            }
            return super.getWritableLedgerDirsForNewLog();
        }
    };

    EntryLogger el = new EntryLogger(conf, ledgerDirsManager);
    EntryLogManagerForEntryLogPerLedger entryLogManager = (EntryLogManagerForEntryLogPerLedger) el
            .getEntryLogManager();

    long firstLedgerId = 100L;
    AtomicBoolean newLogCreated = new AtomicBoolean(false);

    Assert.assertFalse("EntryLogManager cacheMap should not contain entry for firstLedgerId",
            entryLogManager.getCacheAsMap().containsKey(firstLedgerId));
    Assert.assertEquals("Value of the count should be 0", 0, count.get());
    /*
     * In a new thread, create newlog for 'firstLedgerId' and then set
     * 'newLogCreated' to true. Since this is the first createNewLog call,
     * it is going to be blocked untill latch is countdowned to 0.
     */
    new Thread() {
        @Override
        public void run() {
            try {
                entryLogManager.createNewLog(firstLedgerId);
                newLogCreated.set(true);
            } catch (IOException e) {
                LOG.error("Got IOException while creating new log", e);
            }
        }
    }.start();

    /*
     * Wait until entry for 'firstLedgerId' is created in cacheMap. It will
     * be created because in the other thread createNewLog is called.
     */
    while (!entryLogManager.getCacheAsMap().containsKey(firstLedgerId)) {
        Thread.sleep(200);
    }
    Lock firstLedgersLock = entryLogManager.getLock(firstLedgerId);

    /*
     * since 'latch' is not counteddown, newlog should not be created even
     * after waitign for 2 secs.
     */
    Thread.sleep(2000);
    Assert.assertFalse("New log shouldn't have created", newLogCreated.get());

    /*
     * create MaximumNumberOfActiveEntryLogs of entrylogs and do cache
     * cleanup, so that the earliest entry from cache will be removed.
     */
    for (int i = 1; i <= conf.getMaximumNumberOfActiveEntryLogs(); i++) {
        entryLogManager.createNewLog(firstLedgerId + i);
    }
    entryLogManager.doEntryLogMapCleanup();
    Assert.assertFalse("Entry for that ledger shouldn't be there",
            entryLogManager.getCacheAsMap().containsKey(firstLedgerId));

    /*
     * now countdown the latch, so that the other thread can make progress
     * with createNewLog and since this entry is evicted from cache,
     * entrylog of the newly created entrylog will be added to
     * rotatedentrylogs.
     */
    latch.countDown();
    while (!newLogCreated.get()) {
        Thread.sleep(200);
    }
    while (entryLogManager.getRotatedLogChannels().size() < 1) {
        Thread.sleep(200);
    }

    /*
     * Entry for 'firstLedgerId' is removed from cache, but even in this
     * case when we get lock for the 'firstLedgerId' it should be the same
     * as we got earlier.
     */
    Lock lockForThatLedgerAfterRemoval = entryLogManager.getLock(firstLedgerId);
    Assert.assertEquals("For a given ledger lock should be the same before and after removal", firstLedgersLock,
            lockForThatLedgerAfterRemoval);
}

From source file:com.alibaba.wasp.meta.FMetaServicesImplWithoutRetry.java

public boolean isTableAvailable(final byte[] tableName) throws IOException {
    final AtomicBoolean available = new AtomicBoolean(true);
    final AtomicInteger entityGroupCount = new AtomicInteger(0);
    FMetaScanner.MetaScannerVisitor visitor = new FMetaScanner.MetaScannerVisitorBase() {
        @Override/* ww  w .j  a  va2  s. c o m*/
        public boolean processRow(Result row) throws IOException {
            EntityGroupInfo info = FMetaScanner.getEntityGroupInfo(row);
            if (info != null) {
                if (Bytes.equals(tableName, info.getTableName())) {
                    ServerName sn = ServerName.getServerName(row);
                    if (sn == null) {
                        available.set(false);
                        return false;
                    }
                    entityGroupCount.incrementAndGet();
                }
            }
            return true;
        }
    };
    FMetaScanner.metaScan(getConf(), visitor);
    return available.get() && (entityGroupCount.get() > 0);
}

From source file:org.apache.hadoop.hbase.client.HBaseFsck.java

/**
 * Return a list of table names whose metadata have not been modified in the
 * last few milliseconds specified by timelag
 * if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER,
 * SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last
 * milliseconds specified by timelag, then the table is a candidate to be returned.
 * @param regionList - all entries found in .META
 * @return tables that have not been modified recently
 * @throws IOException if an error is encountered
 *//*from w ww  .j  av  a  2  s  .  c o m*/
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
    TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
    long now = System.currentTimeMillis();

    for (HbckInfo hbi : regionInfo.values()) {
        MetaEntry info = hbi.metaEntry;

        // if the start key is zero, then we have found the first region of a table.
        // pick only those tables that were not modified in the last few milliseconds.
        if (info != null && info.getStartKey().length == 0) {
            if (info.modTime + timelag < now) {
                uniqueTables.add(info.getTableDesc());
            } else {
                numSkipped.incrementAndGet(); // one more in-flux table
            }
        }
    }
    return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testPartialRetriesWithInjectedFailures() throws NoSuchMethodException, IllegalAccessException,
        InvocationTargetException, IOException, URISyntaxException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {// w  w w.java  2s  .co  m
        final List<Ds3Object> filesToGet = new ArrayList<>();

        final String DIR_NAME = "largeFiles/";
        final String FILE_NAME = "GreatExpectations.txt";

        final int offsetIntoFirstRange = 10;

        filesToGet.add(new PartialDs3Object(FILE_NAME, Range.byLength(200000, 100000)));

        filesToGet.add(new PartialDs3Object(FILE_NAME, Range.byLength(100000, 100000)));

        filesToGet.add(new PartialDs3Object(FILE_NAME, Range.byLength(offsetIntoFirstRange, 100000)));

        final Ds3ClientShim ds3ClientShim = new Ds3ClientShim((Ds3ClientImpl) client);

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 4;
        final Ds3ClientHelpers ds3ClientHelpers = Ds3ClientHelpers.wrap(ds3ClientShim,
                maxNumBlockAllocationRetries, maxNumObjectTransferAttempts);

        final Ds3ClientHelpers.Job job = ds3ClientHelpers.startReadJob(BUCKET_NAME, filesToGet);
        final AtomicInteger intValue = new AtomicInteger();

        job.attachObjectCompletedListener(new ObjectCompletedListener() {
            int numPartsCompleted = 0;

            @Override
            public void objectCompleted(final String name) {
                assertEquals(1, ++numPartsCompleted);
                intValue.incrementAndGet();
            }
        });

        job.attachDataTransferredListener(new DataTransferredListener() {
            @Override
            public void dataTransferred(final long size) {
                LOG.info("Data transferred size: {}", size);
            }
        });

        job.transfer(new FileObjectGetter(tempDirectory));

        assertEquals(1, intValue.get());

        try (final InputStream originalFileStream = Thread.currentThread().getContextClassLoader()
                .getResourceAsStream(DIR_NAME + FILE_NAME)) {
            final byte[] first300000Bytes = new byte[300000 - offsetIntoFirstRange];
            originalFileStream.skip(offsetIntoFirstRange);
            int numBytesRead = originalFileStream.read(first300000Bytes, 0, 300000 - offsetIntoFirstRange);

            assertThat(numBytesRead, is(300000 - offsetIntoFirstRange));

            try (final InputStream fileReadFromBP = Files
                    .newInputStream(Paths.get(tempDirectory.toString(), FILE_NAME))) {
                final byte[] first300000BytesFromBP = new byte[300000 - offsetIntoFirstRange];

                numBytesRead = fileReadFromBP.read(first300000BytesFromBP, 0, 300000 - offsetIntoFirstRange);
                assertThat(numBytesRead, is(300000 - offsetIntoFirstRange));

                assertTrue(Arrays.equals(first300000Bytes, first300000BytesFromBP));
            }
        }
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.dasein.cloud.azure.tests.network.AzureLoadBalancerSupportWithMockHttpClientTest.java

@Test
public void addServersShouldPostCorrectRequest() throws CloudException, InternalException {
    final String ROLE_NAME_2 = "TESTROLENAME2";
    final String VM_ID_2 = String.format("%s:%s:%s", SERVICE_NAME, DEPLOYMENT_NAME, ROLE_NAME_2);

    final AtomicInteger postCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock/*from  ww  w  .jav  a 2 s  . com*/
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if ("GET".equals(request.getMethod()) && DEFINITION_URL.equals(request.getURI().toString())) {
                assertGet(request, DEFINITION_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                        createDefinitionModel("Failover", "Enabled", HC_PORT));
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if ("POST".equals(request.getMethod())
                    && DEFINITIONS_URL.equals(request.getURI().toString())) {
                postCount.incrementAndGet();
                assertPost(request, DEFINITIONS_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        createDefinitionModelWithAnotherServer("Failover", "Enabled", ROLE_NAME_2));

                DefinitionModel definitionModel = new DefinitionModel();
                definitionModel.setVersion("2");
                DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                        definitionModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };
    loadBalancerSupport.addServers(LB_NAME, ROLE_NAME_2);
    assertEquals("LoadBalancerSupport.addServers() ", 1, postCount.get());
}

From source file:org.apache.distributedlog.auditor.DLAuditor.java

/**
 * Find leak ledgers phase 2: collect ledgers from uris.
 *///from   w  w  w.  java 2  s.  c o m
private Set<Long> collectLedgersFromDL(List<URI> uris, List<List<String>> allocationPaths) throws IOException {
    final Set<Long> ledgers = new TreeSet<Long>();
    List<Namespace> namespaces = new ArrayList<Namespace>(uris.size());
    try {
        for (URI uri : uris) {
            namespaces.add(NamespaceBuilder.newBuilder().conf(conf).uri(uri).build());
        }
        final CountDownLatch doneLatch = new CountDownLatch(uris.size());
        final AtomicInteger numFailures = new AtomicInteger(0);
        ExecutorService executor = Executors.newFixedThreadPool(uris.size());
        try {
            int i = 0;
            for (final Namespace namespace : namespaces) {
                final Namespace dlNamespace = namespace;
                final URI uri = uris.get(i);
                final List<String> aps = allocationPaths.get(i);
                i++;
                executor.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            logger.info("Collecting ledgers from {} : {}", uri, aps);
                            collectLedgersFromAllocator(uri, namespace, aps, ledgers);
                            synchronized (ledgers) {
                                logger.info("Collected {} ledgers from allocators for {} : {} ",
                                        new Object[] { ledgers.size(), uri, ledgers });
                            }
                            collectLedgersFromDL(uri, namespace, ledgers);
                        } catch (IOException e) {
                            numFailures.incrementAndGet();
                            logger.info("Error to collect ledgers from DL : ", e);
                        }
                        doneLatch.countDown();
                    }
                });
            }
            try {
                doneLatch.await();
                if (numFailures.get() > 0) {
                    throw new IOException(numFailures.get() + " errors to collect ledgers from DL");
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                logger.warn("Interrupted on collecting ledgers from DL : ", e);
                throw new DLInterruptedException("Interrupted on collecting ledgers from DL : ", e);
            }
        } finally {
            executor.shutdown();
        }
    } finally {
        for (Namespace namespace : namespaces) {
            namespace.close();
        }
    }
    return ledgers;
}

From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int numEntries = 10000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final ConcurrentLinkedQueue<FpqEntry> events = new ConcurrentLinkedQueue<FpqEntry>();
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxJournalFileSize(1000);//  w w w.  ja v a2  s.co m
    mgr.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        FpqEntry entry = mgr.append(new FpqEntry(x, new byte[100]));
                        events.offer(entry);
                        pushSum.addAndGet(x);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !events.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = events.poll())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }
                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            mgr.reportTake(entry);
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getJournalIdMap().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1));
}

From source file:com.spectralogic.ds3client.integration.Smoke_Test.java

@Test
public void eventHandlerTriggers() throws IOException, URISyntaxException, XmlProcessingException {
    final String bucketName = "eventBucket";

    try {/*from  w  w w.ja  v a 2  s . c o m*/

        final AtomicInteger counter = new AtomicInteger(0);

        HELPERS.ensureBucketExists(bucketName, envDataPolicyId);

        loadBookTestData(client, bucketName);

        final List<Ds3Object> objs = Lists.newArrayList(new Ds3Object("beowulf.txt"));

        final Ds3ClientHelpers.Job job = HELPERS.startReadJob(bucketName, objs);

        job.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                LOG.info("finished getting: " + name);
                counter.incrementAndGet();
            }
        });

        job.transfer(new Ds3ClientHelpers.ObjectChannelBuilder() {
            @Override
            public SeekableByteChannel buildChannel(final String key) throws IOException {
                return new NullChannel();
            }
        });

        assertThat(counter.get(), is(1));
    } finally {
        deleteAllContents(client, bucketName);
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java

private DelegationTokenRenewer createNewDelegationTokenRenewer(Configuration conf,
        final AtomicInteger counter) {
    DelegationTokenRenewer renew = new DelegationTokenRenewer() {

        @Override//from w w  w . j  av  a 2s  . co  m
        protected ThreadPoolExecutor createNewThreadPoolService(Configuration conf) {
            ThreadPoolExecutor pool = new ThreadPoolExecutor(5, 5, 3L, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>()) {

                @Override
                protected void afterExecute(Runnable r, Throwable t) {
                    counter.decrementAndGet();
                    super.afterExecute(r, t);
                }

                @Override
                public void execute(Runnable command) {
                    counter.incrementAndGet();
                    super.execute(command);
                }
            };
            return pool;
        }
    };
    renew.setRMContext(TestUtils.getMockRMContext());
    return renew;
}