Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.activemq.bugs.AMQ6131Test.java

@Test(timeout = 300000)
public void testDurableWithOnePendingAfterRestartAndIndexRecovery() throws Exception {
    final File persistentDir = getPersistentDir();

    broker.getBroker().addDestination(broker.getAdminConnectionContext(), new ActiveMQTopic("durable.sub"),
            false);/*from w  w w  .ja v  a 2s  .  c om*/

    ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection = (ActiveMQConnection) connectionFactory.createConnection();
    connection.setClientID("myId");
    connection.start();
    final Session jmsSession = connection.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable = jmsSession.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");
    final MessageProducer producer = jmsSession.createProducer(new ActiveMQTopic("durable.sub"));

    final int original = new ArrayList<File>(
            FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE))
                    .size();

    // 100k messages
    final byte[] data = new byte[100000];
    final Random random = new Random();
    random.nextBytes(data);

    // run test with enough messages to create a second journal file
    final AtomicInteger messageCount = new AtomicInteger();
    assertTrue("Should have added a journal file", Wait.waitFor(new Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            final ActiveMQBytesMessage message = new ActiveMQBytesMessage();
            message.setContent(new ByteSequence(data));

            for (int i = 0; i < 100; i++) {
                producer.send(message);
                messageCount.getAndIncrement();
            }

            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() > original;
        }
    }));

    // Consume all but 1 message
    for (int i = 0; i < messageCount.get() - 1; i++) {
        durable.receive();
    }

    durable.close();

    // wait until a journal file has been GC'd after receiving messages
    assertTrue("Subscription should go inactive", Wait.waitFor(new Condition() {
        @Override
        public boolean isSatisified() throws Exception {
            return broker.getAdminView().getInactiveDurableTopicSubscribers().length == 1;
        }
    }));

    // force a GC of unneeded journal files
    getBroker().getPersistenceAdapter().checkpoint(true);

    // wait until a journal file has been GC'd after receiving messages
    assertFalse("Should not have garbage collected", Wait.waitFor(new Wait.Condition() {

        @Override
        public boolean isSatisified() throws Exception {
            return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"),
                    TrueFileFilter.INSTANCE)).size() == original;
        }
    }, 5000, 500));

    // stop the broker so we can blow away the index
    getBroker().stop();
    getBroker().waitUntilStopped();

    // delete the index so that the durables are gone from the index
    // The test passes if you take out this delete section
    for (File index : FileUtils.listFiles(persistentDir, new WildcardFileFilter("db.*"),
            TrueFileFilter.INSTANCE)) {
        FileUtils.deleteQuietly(index);
    }

    stopBroker();
    setUpBroker(false);

    assertEquals(1, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(0, broker.getAdminView().getDurableTopicSubscribers().length);

    ActiveMQConnectionFactory connectionFactory2 = new ActiveMQConnectionFactory(this.brokerConnectURI);
    ActiveMQConnection connection2 = (ActiveMQConnection) connectionFactory2.createConnection();
    connection2.setClientID("myId");
    connection2.start();
    final Session jmsSession2 = connection2.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE);

    TopicSubscriber durable2 = jmsSession2.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub");

    assertEquals(0, broker.getAdminView().getInactiveDurableTopicSubscribers().length);
    assertEquals(1, broker.getAdminView().getDurableTopicSubscribers().length);

    assertNotNull(durable2.receive(5000));
}

From source file:com.indeed.lsmtree.recordlog.TestBlockCompressedRecordFile.java

public void testRandomWithReader() throws IOException {
    final BlockCompressedRecordFile<String> recordFile = createBlockCache();
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override//w w  w.  j  ava  2 s  . c o m
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < 10000000; i++) {
                        int rand = r.nextInt(positions.size());
                        final RecordFile.Reader<String> reader = recordFile.reader(positions.get(rand));
                        assertTrue(reader.next());
                        assertEquals(reader.get(), strings.get(rand));
                        reader.close();
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    recordFile.close();
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testCheckAndPut() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    AtomicInteger successCount = new AtomicInteger(0);
    AtomicInteger successIndex = new AtomicInteger(-1);
    int count = 10;
    CountDownLatch latch = new CountDownLatch(count);
    IntStream.range(0, count)//w w  w .j  a  va 2s.  c o  m
            .forEach(
                    i -> table
                            .checkAndPut(row, FAMILY, QUALIFIER, null,
                                    new Put(row).addColumn(FAMILY, QUALIFIER, concat(VALUE, i)))
                            .thenAccept(x -> {
                                if (x) {
                                    successCount.incrementAndGet();
                                    successIndex.set(i);
                                }
                                latch.countDown();
                            }));
    latch.await();
    assertEquals(1, successCount.get());
    String actual = Bytes.toString(table.get(new Get(row)).get().getValue(FAMILY, QUALIFIER));
    assertTrue(actual.endsWith(Integer.toString(successIndex.get())));
}

From source file:com.netflix.curator.framework.recipes.cache.TestPathChildrenCache.java

@Test
public void testRebuildNode() throws Exception {
    PathChildrenCache cache = null;//from w ww .  ja v a 2 s .  c o m
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();
    try {
        client.create().creatingParentsIfNeeded().forPath("/test/one", "one".getBytes());

        final CountDownLatch latch = new CountDownLatch(1);
        final AtomicInteger counter = new AtomicInteger();
        final Semaphore semaphore = new Semaphore(1);
        cache = new PathChildrenCache(client, "/test", true) {
            @Override
            void getDataAndStat(String fullPath) throws Exception {
                semaphore.acquire();
                counter.incrementAndGet();
                super.getDataAndStat(fullPath);
                latch.countDown();
            }
        };
        cache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);

        latch.await();

        int saveCounter = counter.get();
        client.setData().forPath("/test/one", "alt".getBytes());
        cache.rebuildNode("/test/one");
        Assert.assertEquals(cache.getCurrentData("/test/one").getData(), "alt".getBytes());
        Assert.assertEquals(saveCounter, counter.get());

        semaphore.release(1000);
    } finally {
        IOUtils.closeQuietly(cache);
        IOUtils.closeQuietly(client);
    }
}

From source file:com.indeed.lsmtree.recordlog.TestRecordLogDirectory.java

public void testRandomWithReader() throws Exception {
    final RecordLogDirectory<String> fileCache = createRecordLogDirectory();
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override//from   www  . ja v  a2 s . c om
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < 10000; i++) {
                        int rand = r.nextInt(positions.size());
                        final RecordFile.Reader<String> reader = fileCache.reader(positions.get(rand));
                        assertTrue(reader.next());
                        assertEquals(reader.get(), strings.get(rand));
                        reader.close();
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    fileCache.close();
}

From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java

private List<Event> pullEvents(final KafkaChannel channel, ExecutorCompletionService<Void> submitterSvc,
        final int total, final boolean testRollbacks, final boolean retryAfterRollback) {
    final List<Event> eventsPulled = Collections.synchronizedList(new ArrayList<Event>(50));
    final CyclicBarrier barrier = new CyclicBarrier(5);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicInteger rolledBackCount = new AtomicInteger(0);
    final AtomicBoolean startedGettingEvents = new AtomicBoolean(false);
    final AtomicBoolean rolledBack = new AtomicBoolean(false);
    for (int k = 0; k < 5; k++) {
        final int index = k;
        submitterSvc.submit(new Callable<Void>() {
            @Override/*from www .j  a va2 s. co  m*/
            public Void call() throws Exception {
                Transaction tx = null;
                final List<Event> eventsLocal = Lists.newLinkedList();
                int takenByThisThread = 0;
                channel.registerThread();
                Thread.sleep(1000);
                barrier.await();
                while (counter.get() < (total - rolledBackCount.get())) {
                    if (tx == null) {
                        tx = channel.getTransaction();
                        tx.begin();
                    }
                    try {
                        Event e = channel.take();
                        if (e != null) {
                            startedGettingEvents.set(true);
                            eventsLocal.add(e);
                        } else {
                            if (testRollbacks && index == 4 && (!rolledBack.get())
                                    && startedGettingEvents.get()) {
                                tx.rollback();
                                tx.close();
                                tx = null;
                                rolledBack.set(true);
                                final int eventsLocalSize = eventsLocal.size();
                                eventsLocal.clear();
                                if (!retryAfterRollback) {
                                    rolledBackCount.set(eventsLocalSize);
                                    return null;
                                }
                            } else {
                                tx.commit();
                                tx.close();
                                tx = null;
                                eventsPulled.addAll(eventsLocal);
                                counter.getAndAdd(eventsLocal.size());
                                eventsLocal.clear();
                            }
                        }
                    } catch (Exception ex) {
                        eventsLocal.clear();
                        if (tx != null) {
                            tx.rollback();
                            tx.close();
                        }
                        tx = null;
                        ex.printStackTrace();
                    }
                }
                // Close txn.
                return null;
            }
        });
    }
    return eventsPulled;
}

From source file:com.spectralogic.ds3client.helpers.FileSystemHelper_Test.java

private void putObjectThenRunVerification(final FileSystemHelper fileSystemHelper,
        final ResultVerifier resultVerifier) throws IOException, URISyntaxException {
    try {//  w w w.  ja v a2s.  c  om
        final String DIR_NAME = "largeFiles/";
        final String[] FILE_NAMES = new String[] { "lesmis-copies.txt" };

        final Path dirPath = ResourceUtils.loadFileResource(DIR_NAME);

        final AtomicLong totalBookSizes = new AtomicLong(0);

        final List<String> bookTitles = new ArrayList<>();
        final List<Ds3Object> objects = new ArrayList<>();
        for (final String book : FILE_NAMES) {
            final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + book);
            final long bookSize = Files.size(objPath);
            totalBookSizes.getAndAdd(bookSize);
            final Ds3Object obj = new Ds3Object(book, bookSize);

            bookTitles.add(book);
            objects.add(obj);
        }

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 1;
        final int retryDelay = -1;
        final Ds3ClientHelpers ds3ClientHelpers = new Ds3ClientHelpersImpl(client, maxNumBlockAllocationRetries,
                maxNumObjectTransferAttempts, retryDelay, new SameThreadEventRunner(), fileSystemHelper);

        final AtomicInteger numTimesCallbackCalled = new AtomicInteger(0);

        final Ds3ClientHelpers.Job writeJob = ds3ClientHelpers.startWriteJob(BUCKET_NAME, objects);
        writeJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                numTimesCallbackCalled.getAndIncrement();

                final ObjectStorageSpaceVerificationResult result = ds3ClientHelpers
                        .objectsFromBucketWillFitInDirectory(BUCKET_NAME, Arrays.asList(FILE_NAMES),
                                Paths.get("."));

                resultVerifier.verifyResult(result, totalBookSizes.get());
            }
        });

        writeJob.transfer(new FileObjectPutter(dirPath));

        assertEquals(1, numTimesCallbackCalled.get());
    } finally {
        deleteAllContents(client, BUCKET_NAME);
    }
}

From source file:com.smoketurner.pipeline.application.core.MessageProcessor.java

/**
 * Stream an {@link S3Object} object and process each line with the
 * processor.//from  w  w w .  j  ava  2s .c o  m
 * 
 * @param object
 *            S3Object to download and process
 * @return number of events processed
 * @throws IOException
 *             if unable to stream the object
 */
private int streamObject(@Nonnull final S3Object object) throws IOException {

    final AtomicInteger eventCount = new AtomicInteger(0);
    try (S3ObjectInputStream input = object.getObjectContent()) {

        final BufferedReader reader;
        if (AmazonS3Downloader.isGZipped(object)) {
            reader = new BufferedReader(
                    new InputStreamReader(new StreamingGZIPInputStream(input), StandardCharsets.UTF_8));
        } else {
            reader = new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8));
        }

        // failed will be true if we did not successfully broadcast all
        // of the events because of no consumers
        final boolean failed = reader.lines().peek(event -> eventCount.incrementAndGet())
                .anyMatch(broadcaster::test);

        if (failed) {
            // abort the current S3 download
            input.abort();
            LOGGER.error("Partial events broadcast ({} sent) from key: {}/{}", eventCount.get(),
                    object.getBucketName(), object.getKey());
            throw new IOException("aborting download");
        }
    }
    return eventCount.get();
}

From source file:org.alfresco.bm.event.mongo.MongoResultServiceTest.java

/**
 * Create some results but then search for something that does not match
 *///w  w  w .j  ava 2 s. co  m
@Test
public void getZeroResultsUsingHandler() {
    pumpRecords(10);
    long after = resultService.getLastResult().getStartTime() + TimeUnit.HOURS.toMillis(1); // Make sure the query window is out of range

    final AtomicInteger count = new AtomicInteger();
    resultService.getResults(new ResultHandler() {
        @Override
        public boolean processResult(long fromTime, long toTime,
                Map<String, DescriptiveStatistics> statsByEventName, Map<String, Integer> failuresByEventName)
                throws Throwable {
            // Check that we have a failure count for each event
            if (failuresByEventName.size() != statsByEventName.size()) {
                throw new RuntimeException("Didn't have a failure count matching stats count.");
            }
            // Increment
            count.incrementAndGet();
            return true;
        }
    }, after, 20L, 10L, false);

    // Check
    assertEquals(0, count.get());
}

From source file:org.alfresco.bm.event.mongo.MongoResultServiceTest.java

/**
 * Test the case where the reporting period is smaller than the stats window
 *//*from  w ww . j  a va2 s. c om*/
@Test
public void getCheckedResultsUsingHandler() {
    pumpRecords(10);

    final AtomicInteger count = new AtomicInteger();
    final Map<String, DescriptiveStatistics> lastStatsByEventName = new HashMap<String, DescriptiveStatistics>(
            17);

    resultService.getResults(new ResultHandler() {
        @Override
        public boolean processResult(long fromTime, long toTime,
                Map<String, DescriptiveStatistics> statsByEventName, Map<String, Integer> failuresByEventName)
                throws Throwable {
            // Always keep the last stats
            lastStatsByEventName.clear();
            lastStatsByEventName.putAll(statsByEventName);

            count.incrementAndGet();
            return true;
        }
    }, 0L, 200L, 10L, false);
    // Check
    assertEquals(10, count.get());

    // Now go through the last stats received
    // Check it against the last window size
    List<String> names = resultService.getEventNames();
    for (String eventName : names) {
        List<EventRecord> eventResults = resultService.getResults(eventName, 0, 1000);
        DescriptiveStatistics eventStats = new DescriptiveStatistics();
        for (EventRecord eventRecord : eventResults) {
            eventStats.addValue(eventRecord.getTime());
        }
        DescriptiveStatistics lastEventStats = lastStatsByEventName.get(eventName);
        assertNotNull("No last report for event '" + eventName + "'.", lastEventStats);
        // Now check that this matched the last report exactly
        assertEquals("Mean for '" + eventName + "' was not correct. ", (long) Math.floor(eventStats.getMean()),
                (long) Math.floor(lastStatsByEventName.get(eventName).getMean()));
    }
}