Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.master.TestRegionPlacement.java

/**
 * Check whether regions are assigned to servers consistent with the explicit
 * hints that are persisted in the hbase:meta table.
 * Also keep track of the number of the regions are assigned to the
 * primary region server./*from  w w w. j  ava 2  s  .  c om*/
 * @return the number of regions are assigned to the primary region server
 * @throws IOException
 */
private int getNumRegionisOnPrimaryRS() throws IOException {
    final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0);
    final AtomicInteger totalRegionNum = new AtomicInteger(0);
    LOG.info("The start of region placement verification");
    MetaScannerVisitor visitor = new MetaScannerVisitor() {
        public boolean processRow(Result result) throws IOException {
            try {
                HRegionInfo info = MetaScanner.getHRegionInfo(result);
                if (info.getTable().getNamespaceAsString()
                        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
                    return true;
                }
                byte[] server = result.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
                byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
                        FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
                // Add the favored nodes into assignment plan
                ServerName[] favoredServerList = FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes);
                favoredNodesAssignmentPlan.put(info, favoredServerList);

                Position[] positions = Position.values();
                if (info != null) {
                    totalRegionNum.incrementAndGet();
                    if (server != null) {
                        ServerName serverName = ServerName.valueOf(Bytes.toString(server), -1);
                        if (favoredNodes != null) {
                            String placement = "[NOT FAVORED NODE]";
                            for (int i = 0; i < favoredServerList.length; i++) {
                                if (favoredServerList[i].equals(serverName)) {
                                    placement = positions[i].toString();
                                    if (i == Position.PRIMARY.ordinal()) {
                                        regionOnPrimaryNum.incrementAndGet();
                                    }
                                    break;
                                }
                            }
                            LOG.info(info.getRegionNameAsString() + " on " + serverName + " " + placement);
                        } else {
                            LOG.info(info.getRegionNameAsString() + " running on " + serverName
                                    + " but there is no favored region server");
                        }
                    } else {
                        LOG.info(info.getRegionNameAsString() + " not assigned to any server");
                    }
                }
                return true;
            } catch (RuntimeException e) {
                LOG.error("Result=" + result);
                throw e;
            }
        }

        @Override
        public void close() throws IOException {
        }
    };
    MetaScanner.metaScan(TEST_UTIL.getConfiguration(), visitor);
    LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " + totalRegionNum.intValue()
            + " regions running on the primary" + " region servers");
    return regionOnPrimaryNum.intValue();
}

From source file:com.spectralogic.ds3client.integration.Smoke_Test.java

@Test
public void eventHandlerRegistrationAndDeregistration()
        throws IOException, URISyntaxException, XmlProcessingException {
    final String bucketName = "eventBucket";

    try {/*from w  w w  .j  a  v a2 s  .c om*/
        final AtomicInteger counter = new AtomicInteger(0);

        HELPERS.ensureBucketExists(bucketName, envDataPolicyId);

        loadBookTestData(client, bucketName);

        final List<Ds3Object> objs = Lists.newArrayList(new Ds3Object("beowulf.txt"));

        final Ds3ClientHelpers.Job job = HELPERS.startReadJob(bucketName, objs);

        final ObjectCompletedListener eventHandler = new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                LOG.info("finished getting: " + name);
                counter.incrementAndGet();
            }
        };

        job.attachObjectCompletedListener(eventHandler);

        job.removeObjectCompletedListener(eventHandler);

        job.transfer(new Ds3ClientHelpers.ObjectChannelBuilder() {
            @Override
            public SeekableByteChannel buildChannel(final String key) throws IOException {
                return new NullChannel();
            }
        });

        assertThat(counter.get(), is(0));
    } finally {
        deleteAllContents(client, bucketName);
    }
}

From source file:com.betfair.cougar.client.AbstractHttpExecutableTest.java

@Test
public void changeRemoteAddress() throws InterruptedException {
    generateEV(tsd, null);/*w w  w  . j  av a  2  s  . c  o  m*/

    final int maxThreads = 5;
    Thread[] threads = new Thread[maxThreads];

    final OperationKey key = new OperationKey(TestServiceDefinition.TEST_GET, null);

    final AtomicInteger failureCount = new AtomicInteger(0);

    for (int threadCount = 0; threadCount < maxThreads; threadCount++) {
        final Integer threadId = new Integer(threadCount);
        Thread t = new Thread(new Runnable() {
            @Override
            public void run() {
                int iterations = 100;
                ExecutionContext ec = createEC(null, null, false);
                do {
                    observer = new PassFailExecutionObserver(true, true) {
                        @Override
                        public void onResult(ExecutionResult executionResult) {

                        }
                    };

                    try {
                        client.execute(ec, key, new Object[] { TEST_TEXT }, observer, ev,
                                DefaultTimeConstraints.NO_CONSTRAINTS);
                        if (iterations % 5 == 0 && threadId == 0) {
                            client.setRemoteAddress("http://localhost:" + iterations + "/");
                        }
                    } catch (Exception ex) {
                        failureCount.incrementAndGet();
                    }
                } while (--iterations > 0);
            }
        });
        threads[threadCount] = t;
        t.start();

    }

    for (int i = 0; i < maxThreads; i++) {
        threads[i].join();
    }

    if (failureCount.get() > 0) {
        fail("An exception occurred during multithreaded test of remote address modification");
    }

}

From source file:com.twitter.distributedlog.auditor.DLAuditor.java

/**
 * Find leak ledgers phase 2: collect ledgers from uris.
 *//* ww w . j  a  v a  2s .co m*/
private Set<Long> collectLedgersFromDL(List<URI> uris, List<List<String>> allocationPaths) throws IOException {
    final Set<Long> ledgers = new TreeSet<Long>();
    List<com.twitter.distributedlog.DistributedLogManagerFactory> factories = new ArrayList<com.twitter.distributedlog.DistributedLogManagerFactory>(
            uris.size());
    try {
        for (URI uri : uris) {
            factories.add(new com.twitter.distributedlog.DistributedLogManagerFactory(conf, uri));
        }
        final CountDownLatch doneLatch = new CountDownLatch(uris.size());
        final AtomicInteger numFailures = new AtomicInteger(0);
        ExecutorService executor = Executors.newFixedThreadPool(uris.size());
        try {
            int i = 0;
            for (com.twitter.distributedlog.DistributedLogManagerFactory factory : factories) {
                final com.twitter.distributedlog.DistributedLogManagerFactory dlFactory = factory;
                final URI uri = uris.get(i);
                final List<String> aps = allocationPaths.get(i);
                i++;
                executor.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            logger.info("Collecting ledgers from {} : {}", uri, aps);
                            collectLedgersFromAllocator(uri, dlFactory, aps, ledgers);
                            synchronized (ledgers) {
                                logger.info("Collected {} ledgers from allocators for {} : {} ",
                                        new Object[] { ledgers.size(), uri, ledgers });
                            }
                            collectLedgersFromDL(uri, dlFactory, ledgers);
                        } catch (IOException e) {
                            numFailures.incrementAndGet();
                            logger.info("Error to collect ledgers from DL : ", e);
                        }
                        doneLatch.countDown();
                    }
                });
            }
            try {
                doneLatch.await();
                if (numFailures.get() > 0) {
                    throw new IOException(numFailures.get() + " errors to collect ledgers from DL");
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                logger.warn("Interrupted on collecting ledgers from DL : ", e);
                throw new DLInterruptedException("Interrupted on collecting ledgers from DL : ", e);
            }
        } finally {
            executor.shutdown();
        }
    } finally {
        for (com.twitter.distributedlog.DistributedLogManagerFactory factory : factories) {
            factory.close();
        }
    }
    return ledgers;
}

From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java

protected void internalCreateSubscription(String subscriptionName, MessageIdImpl messageId,
        boolean authoritative) {
    if (topicName.isGlobal()) {
        validateGlobalNamespaceOwnership(namespaceName);
    }/*from  w w w.  java  2 s .co  m*/
    messageId = messageId == null ? (MessageIdImpl) MessageId.earliest : messageId;
    log.info("[{}][{}] Creating subscription {} at message id {}", clientAppId(), topicName, subscriptionName,
            messageId);

    PartitionedTopicMetadata partitionMetadata = getPartitionedTopicMetadata(topicName, authoritative);

    try {
        if (partitionMetadata.partitions > 0) {
            // Create the subscription on each partition
            PulsarAdmin admin = pulsar().getAdminClient();

            CountDownLatch latch = new CountDownLatch(partitionMetadata.partitions);
            AtomicReference<Throwable> exception = new AtomicReference<>();
            AtomicInteger failureCount = new AtomicInteger(0);

            for (int i = 0; i < partitionMetadata.partitions; i++) {
                admin.persistentTopics().createSubscriptionAsync(topicName.getPartition(i).toString(),
                        subscriptionName, messageId).handle((result, ex) -> {
                            if (ex != null) {
                                int c = failureCount.incrementAndGet();
                                // fail the operation on unknown exception or if all the partitioned failed due to
                                // subscription-already-exist
                                if (c == partitionMetadata.partitions
                                        || !(ex instanceof PulsarAdminException.ConflictException)) {
                                    exception.set(ex);
                                }
                            }
                            latch.countDown();
                            return null;
                        });
            }

            latch.await();
            if (exception.get() != null) {
                throw exception.get();
            }
        } else {
            validateAdminAccessForSubscriber(subscriptionName, authoritative);

            PersistentTopic topic = (PersistentTopic) getOrCreateTopic(topicName);

            if (topic.getSubscriptions().containsKey(subscriptionName)) {
                throw new RestException(Status.CONFLICT, "Subscription already exists for topic");
            }

            PersistentSubscription subscription = (PersistentSubscription) topic
                    .createSubscription(subscriptionName, InitialPosition.Latest).get();
            subscription.resetCursor(PositionImpl.get(messageId.getLedgerId(), messageId.getEntryId())).get();
            log.info("[{}][{}] Successfully created subscription {} at message id {}", clientAppId(), topicName,
                    subscriptionName, messageId);
        }
    } catch (Throwable e) {
        Throwable t = e.getCause();
        log.warn("[{}] [{}] Failed to create subscription {} at message id {}", clientAppId(), topicName,
                subscriptionName, messageId, e);
        if (t instanceof SubscriptionInvalidCursorPosition) {
            throw new RestException(Status.PRECONDITION_FAILED,
                    "Unable to find position for position specified: " + t.getMessage());
        } else {
            throw new RestException(e);
        }
    }
}

From source file:com.btoddb.fastpersitentqueue.flume.FpqChannelTest.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    channel.setMaxTransactionSize(2000);
    final int popBatchSize = 100;
    channel.setMaxMemorySegmentSizeInBytes(10000000);
    channel.setMaxJournalFileSize(10000000);
    channel.setMaxJournalDurationInMs(30000);
    channel.setFlushPeriodInMs(1000);//  w w w .j  a  va 2 s . c  o m
    channel.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    channel.start();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        Transaction tx = channel.getTransaction();
                        tx.begin();
                        MyEvent event1 = new MyEvent();
                        event1.addHeader("x", String.valueOf(x)).setBody(new byte[numEntries - 8]); // take out size of long
                        channel.put(event1);
                        tx.commit();
                        tx.close();

                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !channel.isEmpty()) {
                    try {
                        Transaction tx = channel.getTransaction();
                        tx.begin();

                        Event event;
                        int count = popBatchSize;
                        while (null != (event = channel.take()) && count-- > 0) {
                            popSum.addAndGet(Long.valueOf(event.getHeaders().get("x")));
                            numPops.incrementAndGet();
                        }

                        tx.commit();
                        tx.close();

                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(channel.isEmpty(), is(true));
    assertThat(pushSum.get(), is(popSum.get()));
}

From source file:jcuda.jcublas.kernel.TestMatrixOperations.java

@Test
public void testMultipleThreads() throws InterruptedException {
    int numThreads = 10;
    final INDArray array = Nd4j.rand(300, 300);
    final INDArray expected = array.dup().mmul(array).mmul(array).div(array).div(array);
    final AtomicInteger correct = new AtomicInteger();
    final CountDownLatch latch = new CountDownLatch(numThreads);
    System.out.println("Running on " + ContextHolder.getInstance().deviceNum());
    ExecutorService executors = ExecutorServiceProvider.getExecutorService();

    for (int x = 0; x < numThreads; x++) {
        executors.execute(new Runnable() {
            @Override/* w  ww . j  a  v  a 2s.  c om*/
            public void run() {
                try {
                    int total = 10;
                    int right = 0;
                    for (int x = 0; x < total; x++) {
                        StopWatch watch = new StopWatch();
                        watch.start();
                        INDArray actual = array.dup().mmul(array).mmul(array).div(array).div(array);
                        watch.stop();
                        if (expected.equals(actual))
                            right++;
                    }

                    if (total == right)
                        correct.incrementAndGet();
                } finally {
                    latch.countDown();
                }

            }
        });
    }

    latch.await();

    assertEquals(numThreads, correct.get());

}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the ability of the SegmentAggregator to reconcile AppendOperations (Cached/NonCached).
 *//*ww w  . j av  a  2s .com*/
@Test
public void testReconcileAppends() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = 1000;
    final int failEvery = 3;

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    // The writes always succeed, but every few times we return some random error, indicating that they didn't.
    AtomicInteger writeCount = new AtomicInteger();
    AtomicReference<Exception> setException = new AtomicReference<>();
    context.storage.setWriteInterceptor((segmentName, offset, data, length, storage) -> {
        if (writeCount.incrementAndGet() % failEvery == 0) {
            // Time to wreak some havoc.
            return storage.write(writeHandle(segmentName), offset, data, length, TIMEOUT).thenAccept(v -> {
                IntentionalException ex = new IntentionalException(
                        String.format("S=%s,O=%d,L=%d", segmentName, offset, length));
                setException.set(ex);
                throw ex;
            });
        } else {
            setException.set(null);
            return null;
        }
    });

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
    }

    context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
    while (context.segmentAggregator.mustFlush()) {
        // Call flush() and inspect the result.
        FlushResult flushResult = null;

        try {
            flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).get(TIMEOUT.toMillis(),
                    TimeUnit.MILLISECONDS);
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(),
                        ExceptionHelpers.getRealException(ex));
            } else {
                // Only expecting a BadOffsetException after our own injected exception.
                Throwable realEx = ExceptionHelpers.getRealException(ex);
                Assert.assertTrue("Unexpected exception thrown: " + realEx,
                        realEx instanceof BadOffsetException);
            }
        }

        // Check flush result.
        if (flushResult != null) {
            AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0,
                    flushResult.getFlushedBytes());
            Assert.assertEquals("Not expecting any merged bytes in this test.", 0,
                    flushResult.getMergedBytes());
        }

        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
    }

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join()
            .getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();

    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}

From source file:org.apache.bookkeeper.client.BookieInfoReader.java

Map<BookieSocketAddress, BookieInfo> getBookieInfo() throws BKException, InterruptedException {
    BookieClient bkc = bk.getBookieClient();
    final AtomicInteger totalSent = new AtomicInteger();
    final AtomicInteger totalCompleted = new AtomicInteger();
    final ConcurrentMap<BookieSocketAddress, BookieInfo> map = new ConcurrentHashMap<BookieSocketAddress, BookieInfo>();
    final CountDownLatch latch = new CountDownLatch(1);
    long requested = BookkeeperProtocol.GetBookieInfoRequest.Flags.TOTAL_DISK_CAPACITY_VALUE
            | BookkeeperProtocol.GetBookieInfoRequest.Flags.FREE_DISK_SPACE_VALUE;

    Collection<BookieSocketAddress> bookies;
    bookies = bk.bookieWatcher.getBookies();
    bookies.addAll(bk.bookieWatcher.getReadOnlyBookies());

    totalSent.set(bookies.size());/*  w ww  .j  a  va2s. c  om*/
    for (BookieSocketAddress b : bookies) {
        bkc.getBookieInfo(b, requested, new GetBookieInfoCallback() {
            @Override
            public void getBookieInfoComplete(int rc, BookieInfo bInfo, Object ctx) {
                BookieSocketAddress b = (BookieSocketAddress) ctx;
                if (rc != BKException.Code.OK) {
                    if (LOG.isErrorEnabled()) {
                        LOG.error("Reading bookie info from bookie {} failed due to {}", b,
                                BKException.codeLogger(rc));
                    }
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Free disk space on bookie {} is {}.", b, bInfo.getFreeDiskSpace());
                    }
                    map.put(b, bInfo);
                }
                if (totalCompleted.incrementAndGet() == totalSent.get()) {
                    latch.countDown();
                }
            }
        }, b);
    }
    try {
        latch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        LOG.error("Received InterruptedException ", e);
        throw e;
    }
    return map;
}