Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:edu.jhu.hlt.concrete.stanford.ConcreteStanfordRunner.java

public void run(Path inPath, Path outPath, Analytic<? extends TokenizedCommunication> analytic) {
    LOGGER.debug("Checking input and output directories.");
    try {/*w  w  w .  j ava2s .  c  o m*/
        prepareInputOutput(inPath, outPath);
    } catch (IOException e) {
        LOGGER.error("Caught IOException when checking input and output directories.", e);
    }

    String lowerOutPathStr = inPath.toString().toLowerCase();
    try {
        sed.disable();

        // Outcomes of outPathStr ending:
        // No valid ending (program exit)
        // Ends with .concrete (first if)
        // Ends with .tar (else, first if)
        // Ends with .tar.gz (else, second if)

        boolean isTarExt = lowerOutPathStr.endsWith(".tar");
        boolean isTarGzExt = lowerOutPathStr.endsWith(".tar.gz") || lowerOutPathStr.endsWith(".tgz");
        boolean isConcreteExt = lowerOutPathStr.endsWith(".concrete") || lowerOutPathStr.endsWith(".comm");

        int nElementsInitPath = inPath.getNameCount();
        Path inputFileName = inPath.getName(nElementsInitPath - 1);

        // If no extention matches, exit.
        if (!isTarExt && !isTarGzExt && !isConcreteExt) {
            LOGGER.error("Input file extension was not '.concrete', '.comm', '.tar', or '.tar.gz'; exiting.");
            System.exit(1);
        } else if (isConcreteExt) {
            // IF .concrete, run single communication.
            LOGGER.info("Annotating single .concrete file at: {}", inPath.toString());
            try (InputStream in = Files.newInputStream(inPath);
                    BufferedInputStream bin = new BufferedInputStream(in, 1024 * 8 * 24);) {
                byte[] inputBytes = IOUtils.toByteArray(bin);
                Communication c = ser.fromBytes(inputBytes);
                WrappedCommunication annotated = analytic.annotate(c);
                Communication ar = annotated.getRoot();
                WritableCommunication wc = new WritableCommunication(ar);
                if (Files.isDirectory(outPath))
                    wc.writeToFile(outPath.resolve(inputFileName), true);
                else
                    wc.writeToFile(outPath, true);
            } catch (AnalyticException e) {
                LOGGER.error("Caught exception when running the analytic.", e);
            }
        } else {

            Path localOutPath;
            if (Files.isDirectory(outPath))
                // if directory, use same extension as input.
                localOutPath = outPath.resolve(inputFileName);
            else
                localOutPath = outPath;

            // Iterate over the archive.
            AutoCloseableIterator<byte[]> iter;
            try (InputStream is = Files.newInputStream(inPath);
                    BufferedInputStream bis = new BufferedInputStream(is, 1024 * 8 * 24);) {

                // open iterator based on file extension
                iter = isTarExt ? new TarArchiveEntryByteIterator(bis) : new TarGzArchiveEntryByteIterator(bis);
                try (OutputStream os = Files.newOutputStream(localOutPath);
                        BufferedOutputStream bos = new BufferedOutputStream(os, 1024 * 8 * 24);) {
                    TarArchiver archiver = isTarExt ? new TarArchiver(bos)
                            : new TarArchiver(new GzipCompressorOutputStream(bos));

                    final StopWatch sw = new StopWatch();
                    sw.start();

                    int docCtr = 0;
                    final AtomicInteger tokenCtr = new AtomicInteger(0);
                    LOGGER.info("Iterating over archive: {}", inPath.toString());
                    while (iter.hasNext()) {
                        Communication n = ser.fromBytes(iter.next());
                        LOGGER.info("Annotating communication: {}", n.getId());
                        try {
                            TokenizedCommunication a = analytic.annotate(n);
                            a.getTokenizations().parallelStream()
                                    .map(tkzToInt -> tkzToInt.getTokenList().getTokenListSize())
                                    .forEach(ct -> tokenCtr.addAndGet(ct));
                            archiver.addEntry(new ArchivableCommunication(a.getRoot()));
                            docCtr++;
                        } catch (AnalyticException | IOException | StringIndexOutOfBoundsException e) {
                            LOGGER.error("Caught exception processing document: " + n.getId(), e);
                        }
                    }

                    try {
                        archiver.close();
                        iter.close();
                    } catch (Exception e) {
                        // unlikely.
                        LOGGER.info("Caught exception closing iterator.", e);
                    }

                    sw.stop();
                    Duration rt = new Duration(sw.getTime());
                    Seconds st = rt.toStandardSeconds();
                    Minutes m = rt.toStandardMinutes();
                    int minutesInt = m.getMinutes();

                    LOGGER.info("Complete.");
                    LOGGER.info("Runtime: approximately {} minutes.", minutesInt);
                    LOGGER.info("Processed {} documents.", docCtr);
                    final int tokens = tokenCtr.get();
                    LOGGER.info("Processed {} tokens.", tokens);
                    if (docCtr > 0 && minutesInt > 0) {
                        final float minutesFloat = minutesInt;
                        float perMin = docCtr / minutesFloat;
                        LOGGER.info("Processed approximately {} documents/minute.", perMin);
                        LOGGER.info("Processed approximately {} tokens/second.",
                                st.getSeconds() / minutesFloat);
                    }
                }
            }
        }
    } catch (IOException | ConcreteException e) {
        LOGGER.error("Caught exception while running the analytic over archive.", e);
    }
}

From source file:com.appleframework.monitor.model.MetricDog.java

private int incrementFireTimes(String projectName, String metricName) {
    String metricNotifyKey = projectName + "_" + metricName;
    metricFireTimes.putIfAbsent(metricNotifyKey, new AtomicInteger(0));
    return metricFireTimes.get(metricNotifyKey).incrementAndGet();
}

From source file:de.undercouch.bson4jackson.BsonParserTest.java

/**
 * Tests reading a very large string using multiple threads. Refers
 * issue #19. Does not fail reproducibly, but with very high probability.
 * You may have to run unit tests several times though to really rule out
 * multi-threading issues./*from  w ww  .  j  a v a 2 s  .  c om*/
 * @throws Exception if something went wrong
 * @author endasb
 */
@Test
public void parseBigStringInThreads() throws Exception {
    final BSONObject o = new BasicBSONObject();
    final AtomicInteger fails = new AtomicInteger(0);
    StringBuilder bigStr = new StringBuilder();
    for (int i = 0; i < 80000; i++) {
        bigStr.append("abc");
    }
    o.put("String", bigStr.toString());

    ArrayList<Thread> threads = new ArrayList<Thread>();
    for (int i = 0; i < 50; i++) {
        threads.add(new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    Map<?, ?> data = parseBsonObject(o);
                    data = parseBsonObject(o);
                    assertNotNull(data);
                } catch (Exception e) {
                    fail("Threading issue " + fails.incrementAndGet());
                }
            }
        }));
    }
    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    assertEquals(0, fails.get());
}

From source file:org.dataconservancy.ui.it.support.CreateIdApiRequestIT.java

/**
 * Generates {@link #countToGenerate} ids, and insures that they are all unique.  Uses multiple threads to
 * generate the ids.//from  ww  w.  j a  v  a  2s  .co  m
 *
 * @throws Exception
 */
@Test
public void testGenerateUniqueIdsMultipleThreads() throws Exception {
    long start = Calendar.getInstance().getTimeInMillis();
    // The threads used to generate ids
    Thread threads[] = new Thread[5];

    // HttpClient requires a ThreadSafeClientConnectionManager
    final ThreadSafeClientConnManager conman = new ThreadSafeClientConnManager();
    conman.setMaxTotal(50);
    conman.setDefaultMaxPerRoute(5);
    hc = new DefaultHttpClient(conman);

    assertEquals("The number of threads (" + threads.length
            + ") must evenly divide into thenumber of ids to be " + "generated (" + countToGenerate + ")", 0,
            countToGenerate % threads.length);
    final int generatePerThread = countToGenerate / threads.length;

    // Launch a thread, with each thread being responsible for generating a portion of the total ids
    for (int j = 0; j < threads.length; j++) {
        threads[j] = new Thread(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < generatePerThread; i++) {
                    double seed = Math.random();
                    log.trace("Seed is {}", seed);
                    Types t = selectType(seed, Types.values());
                    log.trace("Selected type {} with seed value {}", t, seed);

                    if (log.isDebugEnabled()) {
                        idTypeDistribution.putIfAbsent(t, new AtomicInteger(0));
                        idTypeDistribution.get(t).getAndAdd(1);
                    }

                    try {
                        generatedIds.add(reqFactory.createIdApiRequest(t).execute(hc));
                    } catch (IOException e) {
                        fail(e.getMessage());
                    }
                }
            }
        }, "ID Generation Thread " + j);
        threads[j].start();
    }

    // Wait for threads to stop
    for (int j = 0; j < threads.length; j++) {
        threads[j].join();
    }

    if (log.isDebugEnabled()) {
        StringBuilder sb = new StringBuilder("ID distribution:\n");
        int totalGenerated = 0;
        for (Types t : Types.values()) {
            final Integer typeTotal = idTypeDistribution.get(t).intValue();
            totalGenerated += typeTotal;
            sb.append("Type: ").append(t).append(" Count: ").append(typeTotal).append("\n");
        }
        sb.append("Total generated: ").append(totalGenerated).append("\n");
        sb.append("Unique generated: ").append(generatedIds.size()).append("\n");
        sb.append("Number of threads: ").append(threads.length).append("\n");
        sb.append("Execution time: ").append(Calendar.getInstance().getTimeInMillis() - start).append(" ms\n");
        log.debug(sb.toString());
    }

    // The number of generated IDs (stored in the Set) should equal 'countToGenerate'
    assertEquals("Expected " + countToGenerate + " to be generated, but the Set contained "
            + generatedIds.size() + ".  Some ids may not have been unique.", countToGenerate,
            generatedIds.size());

}

From source file:org.glassfish.jersey.examples.sseitemstore.ItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *//*w w w  .  ja  va2 s.c om*/
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<Queue<Integer>>(MAX_LISTENERS);
    final EventSource[] sources = new EventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<Integer>();
        indexQueues.add(indexes);

        es.register(new EventListener() {
            @SuppressWarnings("MagicNumber")
            @Override
            public void onEvent(InboundEvent inboundEvent) {
                try {
                    if (inboundEvent.getName() == null) {
                        final String data = inboundEvent.readData();
                        LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId()
                                + " data=" + data);
                        indexes.add(items.indexOf(data));
                    } else if ("size".equals(inboundEvent.getName())) {
                        sizeEventsCount.incrementAndGet();
                    }
                } catch (Exception ex) {
                    LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                    indexes.add(-999);
                } finally {
                    latch.countDown();
                }
            }
        });
    }

    try {
        open(sources);

        for (String item : items) {
            postItem(itemsTarget, item);
        }

        assertTrue("Waiting to receive all events has timed out.",
                latch.await(
                        (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    for (String item : items) {
        assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item));
    }

    int queueId = 0;
    for (Queue<Integer> indexes : indexQueues) {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId,
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId, items.size(),
                indexes.size());
        queueId++;
    }

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:com.ning.arecibo.collector.persistent.TestDefaultTimelineDAO.java

@Test(groups = "slow")
public void testGetSampleKindsByHostName() throws Exception {
    final TimelineDAO dao = new DefaultTimelineDAO(helper.getDBI(), sampleCoder);
    final DateTime startTime = new DateTime(DateTimeZone.UTC);
    final DateTime endTime = startTime.plusSeconds(2);

    // Create the host
    final String hostName = UUID.randomUUID().toString();
    final Integer hostId = dao.getOrAddHost(hostName);
    Assert.assertNotNull(hostId);/*from   www  .  ja va2s. co m*/

    // Create a timeline times (needed for the join in the dashboard query)
    final Integer eventCategoryId = 123;

    // Create the samples
    final String sampleOne = UUID.randomUUID().toString();
    final Integer sampleOneId = dao.getOrAddSampleKind(hostId, eventCategoryId, sampleOne);
    Assert.assertNotNull(sampleOneId);
    final String sampleTwo = UUID.randomUUID().toString();
    final Integer sampleTwoId = dao.getOrAddSampleKind(hostId, eventCategoryId, sampleTwo);
    Assert.assertNotNull(sampleTwoId);

    // Basic retrieval tests
    final BiMap<Integer, CategoryIdAndSampleKind> sampleKinds = dao.getSampleKinds();
    Assert.assertEquals(sampleKinds.size(), 2);
    Assert.assertEquals(sampleKinds.get(sampleOneId).getEventCategoryId(), (int) eventCategoryId);
    Assert.assertEquals(sampleKinds.get(sampleOneId).getSampleKind(), sampleOne);
    Assert.assertEquals(sampleKinds.get(sampleTwoId).getEventCategoryId(), (int) eventCategoryId);
    Assert.assertEquals(sampleKinds.get(sampleTwoId).getSampleKind(), sampleTwo);
    Assert.assertEquals(dao.getCategoryIdAndSampleKind(sampleOneId).getEventCategoryId(),
            (int) eventCategoryId);
    Assert.assertEquals(dao.getCategoryIdAndSampleKind(sampleOneId).getSampleKind(), sampleOne);
    Assert.assertEquals(dao.getCategoryIdAndSampleKind(sampleTwoId).getEventCategoryId(),
            (int) eventCategoryId);
    Assert.assertEquals(dao.getCategoryIdAndSampleKind(sampleTwoId).getSampleKind(), sampleTwo);

    // No samples yet
    Assert.assertEquals(ImmutableList.<Integer>copyOf(dao.getSampleKindIdsByHostId(hostId)).size(), 0);

    dao.insertTimelineChunk(new TimelineChunk(sampleCoder, 0, hostId, sampleOneId, startTime, endTime,
            new byte[0], new byte[0], 0));
    final ImmutableList<Integer> firstFetch = ImmutableList
            .<Integer>copyOf(dao.getSampleKindIdsByHostId(hostId));
    Assert.assertEquals(firstFetch.size(), 1);
    Assert.assertEquals(firstFetch.get(0), sampleOneId);

    dao.insertTimelineChunk(new TimelineChunk(sampleCoder, 0, hostId, sampleTwoId, startTime, endTime,
            new byte[0], new byte[0], 0));
    final ImmutableList<Integer> secondFetch = ImmutableList
            .<Integer>copyOf(dao.getSampleKindIdsByHostId(hostId));
    Assert.assertEquals(secondFetch.size(), 2);
    Assert.assertTrue(secondFetch.contains(sampleOneId));
    Assert.assertTrue(secondFetch.contains(sampleTwoId));

    // Random sampleKind for random host
    dao.insertTimelineChunk(new TimelineChunk(sampleCoder, 0, Integer.MAX_VALUE - 100, Integer.MAX_VALUE,
            startTime, endTime, new byte[0], new byte[0], 0));
    final ImmutableList<Integer> thirdFetch = ImmutableList
            .<Integer>copyOf(dao.getSampleKindIdsByHostId(hostId));
    Assert.assertEquals(secondFetch.size(), 2);
    Assert.assertTrue(thirdFetch.contains(sampleOneId));
    Assert.assertTrue(thirdFetch.contains(sampleTwoId));

    // Test dashboard query
    final AtomicInteger chunksSeen = new AtomicInteger(0);
    dao.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(hostId),
            ImmutableList.<Integer>of(sampleOneId, sampleTwoId), startTime, startTime.plusSeconds(2),
            new TimelineChunkConsumer() {
                @Override
                public void processTimelineChunk(final TimelineChunk chunk) {
                    chunksSeen.incrementAndGet();
                    Assert.assertEquals((Integer) chunk.getHostId(), hostId);
                    Assert.assertTrue(
                            chunk.getSampleKindId() == sampleOneId || chunk.getSampleKindId() == sampleTwoId);
                }
            });
    Assert.assertEquals(chunksSeen.get(), 2);

    // Dummy queries
    dao.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(Integer.MAX_VALUE), null, startTime,
            startTime.plusDays(1), FAIL_CONSUMER);
    dao.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(hostId),
            ImmutableList.<Integer>of(Integer.MAX_VALUE), startTime, startTime.plusDays(1), FAIL_CONSUMER);
    dao.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(hostId),
            ImmutableList.<Integer>of(sampleOneId, sampleTwoId), startTime.plusDays(1), startTime.plusDays(2),
            FAIL_CONSUMER);
}

From source file:com.ery.estorm.util.Threads.java

/**
 * Returns a {@link java.util.concurrent.ThreadFactory} that names each created thread uniquely, with a common prefix.
 * //from   w  w  w.  j av  a  2 s .  c o m
 * @param prefix
 *            The prefix of every created Thread's name
 * @return a {@link java.util.concurrent.ThreadFactory} that names threads
 */
public static ThreadFactory getNamedThreadFactory(final String prefix) {
    SecurityManager s = System.getSecurityManager();
    final ThreadGroup threadGroup = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();

    return new ThreadFactory() {
        final AtomicInteger threadNumber = new AtomicInteger(1);
        private final int poolNumber = Threads.poolNumber.getAndIncrement();
        final ThreadGroup group = threadGroup;

        @Override
        public Thread newThread(Runnable r) {
            final String name = prefix + "-pool" + poolNumber + "-t" + threadNumber.getAndIncrement();
            return new Thread(group, r, name);
        }
    };
}

From source file:com.tango.elasticsearch.rest.action.unique.UniqueTermsAction.java

private void submitSearchRequests(final RestRequest request, final RestChannel channel,
        Map<SearchRequest, String> searchRequestsToCacheKeyMap, final List<TermsResult> cachedResponses) {
    int indexIndex = 0;
    final AtomicArray<Throwable> searchErrors = new AtomicArray<Throwable>(searchRequestsToCacheKeyMap.size());
    final AtomicInteger counter = new AtomicInteger(searchRequestsToCacheKeyMap.size());
    final AtomicArray<TermsResult> searchResults = new AtomicArray<TermsResult>(
            searchRequestsToCacheKeyMap.size());
    for (Map.Entry<SearchRequest, String> searchRequestEntry : searchRequestsToCacheKeyMap.entrySet()) {
        final String requestKey = searchRequestEntry.getValue();
        final SearchRequest searchRequest = searchRequestEntry.getKey();
        final int index = indexIndex;
        client.search(searchRequest, new ActionListener<SearchResponse>() {
            @Override/*w w  w .j  av a 2  s  . com*/
            public void onResponse(SearchResponse response) {
                try {
                    TermsResult result = extractTermsResult(response);
                    searchResults.set(index, result);
                    if (requestKey.length() > 0 && result != null && result.getOtherCount() == 0) {
                        putToCache(requestKey, result);
                    }
                    if (counter.decrementAndGet() == 0) {
                        Throwable throwable = checkErrors(searchErrors);
                        if (throwable != null) {
                            processFailure(throwable, channel, request);
                        } else {
                            Collections.addAll(cachedResponses,
                                    searchResults.toArray(new TermsResult[searchResults.length()]));
                            aggregateResults(cachedResponses, request, channel);
                        }
                    }
                } catch (Exception e) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("failed to execute search (building response)", e);
                    }
                    onFailure(e);
                }
            }

            private Throwable checkErrors(AtomicArray<Throwable> searchErrors) {
                Throwable result = null;
                for (int i = 0; i < searchErrors.length(); i++) {
                    Throwable th = searchErrors.get(i);
                    if (th != null) {
                        result = th;
                        break;
                    }
                }
                return result;
            }

            @Override
            public void onFailure(Throwable e) {
                searchErrors.set(index, e);
                if (counter.decrementAndGet() <= 0) {
                    processFailure(e, channel, request);
                }
            }
        });
        indexIndex++;
    }
}

From source file:org.uimafit.factory.ExternalResourceFactoryTest.java

@Test
public void testDirectInjectionAutowire() throws Exception {
    // Create analysis enginge description
    AnalysisEngineDescription desc = createPrimitiveDescription(DummyAE2.class);

    // Bind external resources for DummyAE
    bindResources(desc);/*from   ww  w. ja  v  a  2  s .c  o  m*/

    // Create a custom resource manager that allows to inject any Java object as an external
    // dependency
    final Map<String, Object> externalContext = new HashMap<String, Object>();
    externalContext.put(DummyAE2.RES_INJECTED_POJO1, "Just an injected POJO");
    externalContext.put(DummyAE2.RES_INJECTED_POJO2, new AtomicInteger(5));

    SimpleNamedResourceManager resMgr = new SimpleNamedResourceManager();
    resMgr.setExternalContext(externalContext);
    resMgr.setAutoWireEnabled(true);
    assertTrue(resMgr.isAutoWireEnabled());

    AnalysisEngine ae = UIMAFramework.produceAnalysisEngine(desc, resMgr, null);
    assertNotNull(ae);

    ae.process(ae.newJCas());
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessReadWriteLock.java

@Test
public void testBasic() throws Exception {
    final int CONCURRENCY = 8;
    final int ITERATIONS = 100;

    final Random random = new Random();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    final AtomicInteger maxConcurrentCount = new AtomicInteger(0);
    final AtomicInteger writeCount = new AtomicInteger(0);
    final AtomicInteger readCount = new AtomicInteger(0);

    List<Future<Void>> futures = Lists.newArrayList();
    ExecutorService service = Executors.newCachedThreadPool();
    for (int i = 0; i < CONCURRENCY; ++i) {
        Future<Void> future = service.submit(new Callable<Void>() {
            @Override//from  ww  w  . jav  a 2  s  . co  m
            public Void call() throws Exception {
                CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                        new RetryOneTime(1));
                client.start();
                try {
                    InterProcessReadWriteLock lock = new InterProcessReadWriteLock(client, "/lock");
                    for (int i = 0; i < ITERATIONS; ++i) {
                        if (random.nextInt(100) < 10) {
                            doLocking(lock.writeLock(), concurrentCount, maxConcurrentCount, random, 1);
                            writeCount.incrementAndGet();
                        } else {
                            doLocking(lock.readLock(), concurrentCount, maxConcurrentCount, random,
                                    Integer.MAX_VALUE);
                            readCount.incrementAndGet();
                        }
                    }
                } finally {
                    IOUtils.closeQuietly(client);
                }
                return null;
            }
        });
        futures.add(future);
    }

    for (Future<Void> future : futures) {
        future.get();
    }

    System.out.println("Writes: " + writeCount.get() + " - Reads: " + readCount.get() + " - Max Reads: "
            + maxConcurrentCount.get());

    Assert.assertTrue(writeCount.get() > 0);
    Assert.assertTrue(readCount.get() > 0);
    Assert.assertTrue(maxConcurrentCount.get() > 1);
}