Example usage for java.util.concurrent.atomic AtomicInteger addAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger addAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger addAndGet.

Prototype

public final int addAndGet(int delta) 

Source Link

Document

Atomically adds the given value to the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:byps.test.servlet.MyServerIF.java

@Override
public int callClientParallel(int nbOfCalls) throws RemoteException {
    if (log.isDebugEnabled())
        log.debug("callClientParallel(" + nbOfCalls);
    final ClientIF clientIF = getClientIF();
    final AtomicInteger ret = new AtomicInteger(0);
    ExecutorService tpool = Executors.newCachedThreadPool();
    for (int i = 0; i < nbOfCalls; i++) {
        Runnable run = new Runnable() {
            public void run() {
                try {
                    if (log.isDebugEnabled())
                        log.debug("clientIF.incrementInt(");
                    int v = clientIF.incrementInt(0);
                    if (log.isDebugEnabled())
                        log.debug(")clientIF.incrementInt");
                    ret.addAndGet(v);
                } catch (Exception e) {
                    log.error(e);/*from w ww  .j a  va 2  s .com*/
                }
            }
        };
        tpool.execute(run);
    }
    tpool.shutdown();
    try {
        tpool.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new BException(BExceptionC.CANCELLED, e.toString(), e);
    }
    if (log.isDebugEnabled())
        log.debug(")callClientParallel");
    return ret.get();
}

From source file:edu.msu.cme.rdp.kmer.cli.KmerCoverage.java

public void printCovereage(OutputStream coverage_out, OutputStream abundance_out) throws IOException {
    adjustCount();/*  w  ww.  j a  v  a2  s  .  co  m*/
    // print out the weighted kmer coverage
    // we found mean coverage matched the previous biological observation
    PrintStream coverage_outStream = new PrintStream(coverage_out);
    coverage_outStream.println("#total reads: " + totalReads.intValue());
    coverage_outStream.println("#use mean_cov to adjust the contig abundance, not median_cov ");
    coverage_outStream.println("#seqid\tmean_cov\tmedian_cov\ttotal_pos\tcovered_pos\tcovered_ratio");

    for (Contig contig : contigMap.values()) {
        ArrayList<Double> counts = new ArrayList<Double>();
        int coveredPos = 0;
        for (int pos = 0; pos < contig.coverage.length; pos++) {
            if (contig.coverage[pos] > 0) {
                coveredPos++;
            }
            counts.add(contig.coverage[pos]);
        }
        if (coveredPos > 0) {
            coverage_outStream.println(contig.name + "\t" + String.format(dformat, StdevCal.calMean(counts))
                    + "\t" + String.format(dformat, (StdevCal.calMedian(counts))) + "\t" + counts.size() + "\t"
                    + coveredPos + "\t"
                    + String.format(dformat, (double) coveredPos / (double) contig.coverage.length));
        } else { // no coverage
            coverage_outStream.println(
                    contig.name + "\t" + 0 + "\t" + 0 + "\t" + contig.coverage.length + "\t" + 0 + "\t" + 0);
        }
    }
    coverage_outStream.close();

    // print kmer abundance
    HashMap<Integer, Integer> abundanceCountMap = new HashMap<Integer, Integer>(); // the frequeny of the kmer abundance         
    PrintStream abundance_outStream = new PrintStream(abundance_out);
    // need to merge the counts from forward and reverse together.
    HashSet<Kmer> kmerSet = new HashSet<Kmer>();
    kmerSet.addAll(kmerMaps[0].keySet());
    for (Kmer kmer : kmerSet) {
        AtomicInteger abundance = kmerMaps[0].get(kmer).count;

        String reverseKmerStr = IUBUtilities.reverseComplement(kmer.decodeLong(kmer.getLongKmers()));
        Kmer reverseKmer = (new NuclKmerGenerator(reverseKmerStr, this.kmerSize)).next();
        KmerAbund kmerAbund = kmerMaps[1].get(reverseKmer);

        if (kmerAbund != null) {
            abundance.addAndGet(kmerAbund.count.get());
        }

        Integer count = abundanceCountMap.get(abundance.get());
        if (count == null) {
            abundanceCountMap.put(abundance.get(), 1);
        } else {
            abundanceCountMap.put(abundance.get(), count + 1);
        }
    }

    abundance_outStream.println("kmer_abundance\tfrequency");
    for (Integer abundance : abundanceCountMap.keySet()) {
        abundance_outStream.println(abundance + "\t" + abundanceCountMap.get(abundance));
    }
    abundance_outStream.close();
}

From source file:org.apache.hadoop.hbase.regionserver.TestRegionReplicaFailover.java

/**
 * Tests the case where there are 3 region replicas and the primary is continuously accepting
 * new writes while one of the secondaries is killed. Verification is done for both of the
 * secondary replicas./*from   ww w. j  av  a 2s.c om*/
 */
@Test(timeout = 120000)
public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Exception {
    try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
            Table table = connection.getTable(htd.getTableName());
            Admin admin = connection.getAdmin()) {
        // start a thread to do the loading of primary
        HTU.loadNumericRows(table, fam, 0, 1000); // start with some base
        admin.flush(table.getName());
        HTU.loadNumericRows(table, fam, 1000, 2000);

        final AtomicReference<Throwable> ex = new AtomicReference<Throwable>(null);
        final AtomicBoolean done = new AtomicBoolean(false);
        final AtomicInteger key = new AtomicInteger(2000);

        Thread loader = new Thread() {
            @Override
            public void run() {
                while (!done.get()) {
                    try {
                        HTU.loadNumericRows(table, fam, key.get(), key.get() + 1000);
                        key.addAndGet(1000);
                    } catch (Throwable e) {
                        ex.compareAndSet(null, e);
                    }
                }
            }
        };
        loader.start();

        Thread aborter = new Thread() {
            @Override
            public void run() {
                try {
                    boolean aborted = false;
                    for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
                        for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) {
                            if (r.getRegionInfo().getReplicaId() == 1) {
                                LOG.info("Aborting region server hosting secondary region replica");
                                rs.getRegionServer().abort("for test");
                                aborted = true;
                            }
                        }
                    }
                    assertTrue(aborted);
                } catch (Throwable e) {
                    ex.compareAndSet(null, e);
                }
            };
        };

        aborter.start();
        aborter.join();
        done.set(true);
        loader.join();

        assertNull(ex.get());

        assertTrue(key.get() > 1000); // assert that the test is working as designed
        LOG.info("Loaded up to key :" + key.get());
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 0, 30000);
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 1, 30000);
        verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 2, 30000);
    }

    // restart the region server
    HTU.getMiniHBaseCluster().startRegionServer();
}

From source file:org.dita.dost.AbstractIntegrationTest.java

/**
 * @param id      old ID value//from w ww .j  a v a2s.  c  om
 * @param idMap   ID map
 * @param counter counter
 * @param pattern pattern to test
 */
private void rewriteId(final String id, final Map<String, String> idMap, final AtomicInteger counter,
        final Pattern pattern) {
    final Matcher m = pattern.matcher(id);
    if (m.matches()) {
        if (!idMap.containsKey(id)) {
            final int i = counter.addAndGet(1);
            idMap.put(id, "gen-id-" + Integer.toString(i));
        }
    }
}

From source file:com.blacklocus.jres.request.index.JresUpdateDocumentScriptTest.java

@Test
public void testRetryOnConflict() throws InterruptedException {
    final String index = "JresUpdateDocumentScriptTest.testRetryOnConflict".toLowerCase();
    final String type = "test";
    final String id = "warzone";

    final AtomicInteger total = new AtomicInteger();
    final AtomicReference<String> error = new AtomicReference<String>();
    final Random random = new Random(System.currentTimeMillis());

    final int numThreads = 16, numIterations = 100;

    ExecutorService x = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        x.submit(new Runnable() {
            @Override//  ww  w.j  av  a 2 s . c om
            public void run() {
                try {
                    for (int j = 0; j < numIterations; j++) {
                        int increment = random.nextInt(5);
                        total.addAndGet(increment);
                        JresUpdateDocumentScript req = new JresUpdateDocumentScript(index, type, id,
                                "ctx._source.value += increment", ImmutableMap.of("increment", increment),
                                ImmutableMap.of("value", increment), null);
                        req.setRetryOnConflict(numIterations * 10);
                        jres.quest(req);
                    }
                } catch (Exception e) {
                    error.set(e.getMessage());
                }
            }
        });
    }
    x.shutdown();
    x.awaitTermination(1, TimeUnit.MINUTES);

    Assert.assertNull("With so many retries, all of these should have gotten through without conflict error",
            error.get());
    jres.quest(new JresRefresh(index));
    JresGetDocumentReply getReply = jres.quest(new JresGetDocument(index, type, id));
    Map<String, Integer> doc = getReply.getSourceAsType(new TypeReference<Map<String, Integer>>() {
    });
    Assert.assertEquals("All increments should have gotten committed", (Object) total.get(), doc.get("value"));
    Assert.assertEquals("Should have been numThreads * numIterations versions committed",
            (Object) (numThreads * numIterations), getReply.getVersion());
}

From source file:edu.jhu.hlt.concrete.stanford.ConcreteStanfordRunner.java

public void run(Path inPath, Path outPath, Analytic<? extends TokenizedCommunication> analytic) {
    LOGGER.debug("Checking input and output directories.");
    try {/*w  ww  . j  av  a2  s  .c  om*/
        prepareInputOutput(inPath, outPath);
    } catch (IOException e) {
        LOGGER.error("Caught IOException when checking input and output directories.", e);
    }

    String lowerOutPathStr = inPath.toString().toLowerCase();
    try {
        sed.disable();

        // Outcomes of outPathStr ending:
        // No valid ending (program exit)
        // Ends with .concrete (first if)
        // Ends with .tar (else, first if)
        // Ends with .tar.gz (else, second if)

        boolean isTarExt = lowerOutPathStr.endsWith(".tar");
        boolean isTarGzExt = lowerOutPathStr.endsWith(".tar.gz") || lowerOutPathStr.endsWith(".tgz");
        boolean isConcreteExt = lowerOutPathStr.endsWith(".concrete") || lowerOutPathStr.endsWith(".comm");

        int nElementsInitPath = inPath.getNameCount();
        Path inputFileName = inPath.getName(nElementsInitPath - 1);

        // If no extention matches, exit.
        if (!isTarExt && !isTarGzExt && !isConcreteExt) {
            LOGGER.error("Input file extension was not '.concrete', '.comm', '.tar', or '.tar.gz'; exiting.");
            System.exit(1);
        } else if (isConcreteExt) {
            // IF .concrete, run single communication.
            LOGGER.info("Annotating single .concrete file at: {}", inPath.toString());
            try (InputStream in = Files.newInputStream(inPath);
                    BufferedInputStream bin = new BufferedInputStream(in, 1024 * 8 * 24);) {
                byte[] inputBytes = IOUtils.toByteArray(bin);
                Communication c = ser.fromBytes(inputBytes);
                WrappedCommunication annotated = analytic.annotate(c);
                Communication ar = annotated.getRoot();
                WritableCommunication wc = new WritableCommunication(ar);
                if (Files.isDirectory(outPath))
                    wc.writeToFile(outPath.resolve(inputFileName), true);
                else
                    wc.writeToFile(outPath, true);
            } catch (AnalyticException e) {
                LOGGER.error("Caught exception when running the analytic.", e);
            }
        } else {

            Path localOutPath;
            if (Files.isDirectory(outPath))
                // if directory, use same extension as input.
                localOutPath = outPath.resolve(inputFileName);
            else
                localOutPath = outPath;

            // Iterate over the archive.
            AutoCloseableIterator<byte[]> iter;
            try (InputStream is = Files.newInputStream(inPath);
                    BufferedInputStream bis = new BufferedInputStream(is, 1024 * 8 * 24);) {

                // open iterator based on file extension
                iter = isTarExt ? new TarArchiveEntryByteIterator(bis) : new TarGzArchiveEntryByteIterator(bis);
                try (OutputStream os = Files.newOutputStream(localOutPath);
                        BufferedOutputStream bos = new BufferedOutputStream(os, 1024 * 8 * 24);) {
                    TarArchiver archiver = isTarExt ? new TarArchiver(bos)
                            : new TarArchiver(new GzipCompressorOutputStream(bos));

                    final StopWatch sw = new StopWatch();
                    sw.start();

                    int docCtr = 0;
                    final AtomicInteger tokenCtr = new AtomicInteger(0);
                    LOGGER.info("Iterating over archive: {}", inPath.toString());
                    while (iter.hasNext()) {
                        Communication n = ser.fromBytes(iter.next());
                        LOGGER.info("Annotating communication: {}", n.getId());
                        try {
                            TokenizedCommunication a = analytic.annotate(n);
                            a.getTokenizations().parallelStream()
                                    .map(tkzToInt -> tkzToInt.getTokenList().getTokenListSize())
                                    .forEach(ct -> tokenCtr.addAndGet(ct));
                            archiver.addEntry(new ArchivableCommunication(a.getRoot()));
                            docCtr++;
                        } catch (AnalyticException | IOException | StringIndexOutOfBoundsException e) {
                            LOGGER.error("Caught exception processing document: " + n.getId(), e);
                        }
                    }

                    try {
                        archiver.close();
                        iter.close();
                    } catch (Exception e) {
                        // unlikely.
                        LOGGER.info("Caught exception closing iterator.", e);
                    }

                    sw.stop();
                    Duration rt = new Duration(sw.getTime());
                    Seconds st = rt.toStandardSeconds();
                    Minutes m = rt.toStandardMinutes();
                    int minutesInt = m.getMinutes();

                    LOGGER.info("Complete.");
                    LOGGER.info("Runtime: approximately {} minutes.", minutesInt);
                    LOGGER.info("Processed {} documents.", docCtr);
                    final int tokens = tokenCtr.get();
                    LOGGER.info("Processed {} tokens.", tokens);
                    if (docCtr > 0 && minutesInt > 0) {
                        final float minutesFloat = minutesInt;
                        float perMin = docCtr / minutesFloat;
                        LOGGER.info("Processed approximately {} documents/minute.", perMin);
                        LOGGER.info("Processed approximately {} tokens/second.",
                                st.getSeconds() / minutesFloat);
                    }
                }
            }
        }
    } catch (IOException | ConcreteException e) {
        LOGGER.error("Caught exception while running the analytic over archive.", e);
    }
}

From source file:org.apache.hedwig.server.delivery.TestThrottlingDelivery.java

private void throttleWithFilter(Publisher pub, final Subscriber sub, ByteString topic, ByteString subid,
        final int X) throws Exception {
    // publish numbers with header (so only 3 messages would be delivered)
    publishNums(pub, topic, 0, 3 * X, X);

    // subscribe the topic with filter
    PubSubProtocol.Map userOptions = PubSubProtocol.Map.newBuilder().addEntries(PubSubProtocol.Map.Entry
            .newBuilder().setKey(OPT_MOD).setValue(ByteString.copyFromUtf8(String.valueOf(X)))).build();
    SubscriptionOptions opts = SubscriptionOptions.newBuilder().setCreateOrAttach(CreateOrAttach.ATTACH)
            .setOptions(userOptions).setMessageFilter(ModMessageFilter.class.getName()).build();
    sub.subscribe(topic, subid, opts);//from w w w .  ja v a 2 s .  c  o m

    final AtomicInteger expected = new AtomicInteger(X);
    final CountDownLatch latch = new CountDownLatch(1);
    sub.startDelivery(topic, subid, new MessageHandler() {
        @Override
        public synchronized void deliver(ByteString topic, ByteString subscriberId, Message msg,
                Callback<Void> callback, Object context) {
            try {
                int value = Integer.valueOf(msg.getBody().toStringUtf8());
                logger.debug("Received message {},", value);

                if (value == expected.get()) {
                    expected.addAndGet(X);
                } else {
                    // error condition
                    logger.error("Did not receive expected value, expected {}, got {}", expected.get(), value);
                    expected.set(0);
                    latch.countDown();
                }
                if (value == 3 * X) {
                    latch.countDown();
                }
                callback.operationFinished(context, null);
                sub.consume(topic, subscriberId, msg.getMsgId());
            } catch (Exception e) {
                logger.error("Received bad message", e);
                latch.countDown();
            }
        }
    });

    assertTrue("Timed out waiting for messages " + 3 * X, latch.await(10, TimeUnit.SECONDS));
    assertEquals("Should be expected message with " + 4 * X, 4 * X, expected.get());

    sub.stopDelivery(topic, subid);
    sub.closeSubscription(topic, subid);
}

From source file:org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFilesSplitRecovery.java

/**
 * This test splits a table and attempts to bulk load.  The bulk import files
 * should be split before atomically importing.
 */// ww w  .  ja v a2 s.c  o  m
@Test
public void testGroupOrSplitPresplit() throws Exception {
    final String table = "groupOrSplitPresplit";
    setupTable(table, 10);
    populateTable(table, 1);
    assertExpectedTable(table, ROWCOUNT, 1);
    forceSplit(table);

    final AtomicInteger countedLqis = new AtomicInteger();
    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
        protected List<LoadQueueItem> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups,
                final LoadQueueItem item, final HTable htable, final Pair<byte[][], byte[][]> startEndKeys)
                throws IOException {
            List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
            if (lqis != null) {
                countedLqis.addAndGet(lqis.size());
            }
            return lqis;
        }
    };

    // create HFiles for different column families
    Path bulk = buildBulkFiles(table, 2);
    HTable ht = new HTable(util.getConfiguration(), Bytes.toBytes(table));
    lih.doBulkLoad(bulk, ht);

    assertExpectedTable(table, ROWCOUNT, 2);
    assertEquals(20, countedLqis.get());
}

From source file:org.apache.hedwig.server.filter.TestMessageFilter.java

private void receiveNumModM(final ByteString topic, final ByteString subid, final String filterClassName,
        final ClientMessageFilter filter, final int start, final int num, final int M, final boolean consume)
        throws Exception {
    PubSubProtocol.Map userOptions = PubSubProtocol.Map.newBuilder().addEntries(PubSubProtocol.Map.Entry
            .newBuilder().setKey(OPT_MOD).setValue(ByteString.copyFromUtf8(String.valueOf(M)))).build();
    SubscriptionOptions.Builder optionsBuilder = SubscriptionOptions.newBuilder()
            .setCreateOrAttach(CreateOrAttach.ATTACH).setOptions(userOptions);
    if (null != filterClassName) {
        optionsBuilder.setMessageFilter(filterClassName);
    }/*from ww w . java  2s  .  com*/
    subscriber.subscribe(topic, subid, optionsBuilder.build());

    final int base = start + M - start % M;

    final AtomicInteger expected = new AtomicInteger(base);
    final CountDownLatch latch = new CountDownLatch(1);
    MessageHandler msgHandler = new MessageHandler() {
        synchronized public void deliver(ByteString topic, ByteString subscriberId, Message msg,
                Callback<Void> callback, Object context) {
            try {
                int value = Integer.valueOf(msg.getBody().toStringUtf8());
                // duplicated messages received, ignore them
                if (value > start) {
                    if (value == expected.get()) {
                        expected.addAndGet(M);
                    } else {
                        logger.error("Did not receive expected value, expected {}, got {}", expected.get(),
                                value);
                        expected.set(0);
                        latch.countDown();
                    }
                    if (expected.get() == (base + num * M)) {
                        latch.countDown();
                    }
                }
                callback.operationFinished(context, null);
                if (consume) {
                    subscriber.consume(topic, subid, msg.getMsgId());
                }
            } catch (Exception e) {
                logger.error("Received bad message", e);
                latch.countDown();
            }
        }
    };
    if (null != filter) {
        subscriber.startDeliveryWithFilter(topic, subid, msgHandler, filter);
    } else {
        subscriber.startDelivery(topic, subid, msgHandler);
    }
    assertTrue("Timed out waiting for messages mod " + M + " expected is " + expected.get(),
            latch.await(10, TimeUnit.SECONDS));
    assertEquals("Should be expected message with " + (base + num * M), (base + num * M), expected.get());
    subscriber.stopDelivery(topic, subid);
    subscriber.closeSubscription(topic, subid);
}

From source file:org.apache.hadoop.hbase.procedure2.TestProcedureSchedulerConcurrency.java

private void testConcurrentWaitWake(final boolean useWakeBatch) throws Exception {
    final int WAIT_THRESHOLD = 2500;
    final int NPROCS = 20;
    final int NRUNS = 500;

    final ProcedureScheduler sched = procSched;
    for (long i = 0; i < NPROCS; ++i) {
        sched.addBack(new TestProcedureWithEvent(i));
    }//from w  ww.  j a  va  2s.  c o  m

    final Thread[] threads = new Thread[4];
    final AtomicInteger waitCount = new AtomicInteger(0);
    final AtomicInteger wakeCount = new AtomicInteger(0);

    final ConcurrentSkipListSet<TestProcedureWithEvent> waitQueue = new ConcurrentSkipListSet<TestProcedureWithEvent>();
    threads[0] = new Thread() {
        @Override
        public void run() {
            long lastUpdate = 0;
            while (true) {
                final int oldWakeCount = wakeCount.get();
                if (useWakeBatch) {
                    ProcedureEvent[] ev = new ProcedureEvent[waitQueue.size()];
                    for (int i = 0; i < ev.length; ++i) {
                        ev[i] = waitQueue.pollFirst().getEvent();
                        LOG.debug("WAKE BATCH " + ev[i] + " total=" + wakeCount.get());
                    }
                    sched.wakeEvents(ev.length, ev);
                    wakeCount.addAndGet(ev.length);
                } else {
                    int size = waitQueue.size();
                    while (size-- > 0) {
                        ProcedureEvent ev = waitQueue.pollFirst().getEvent();
                        sched.wakeEvent(ev);
                        LOG.debug("WAKE " + ev + " total=" + wakeCount.get());
                        wakeCount.incrementAndGet();
                    }
                }
                if (wakeCount.get() != oldWakeCount) {
                    lastUpdate = System.currentTimeMillis();
                } else if (wakeCount.get() >= NRUNS
                        && (System.currentTimeMillis() - lastUpdate) > WAIT_THRESHOLD) {
                    break;
                }
                Threads.sleepWithoutInterrupt(25);
            }
        }
    };

    for (int i = 1; i < threads.length; ++i) {
        threads[i] = new Thread() {
            @Override
            public void run() {
                while (true) {
                    TestProcedureWithEvent proc = (TestProcedureWithEvent) sched.poll();
                    if (proc == null)
                        continue;

                    sched.suspendEvent(proc.getEvent());
                    waitQueue.add(proc);
                    sched.waitEvent(proc.getEvent(), proc);
                    LOG.debug("WAIT " + proc.getEvent());
                    if (waitCount.incrementAndGet() >= NRUNS) {
                        break;
                    }
                }
            }
        };
    }

    for (int i = 0; i < threads.length; ++i) {
        threads[i].start();
    }
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }

    sched.clear();
}