Example usage for java.util.concurrent.atomic AtomicInteger addAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger addAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger addAndGet.

Prototype

public final int addAndGet(int delta) 

Source Link

Document

Atomically adds the given value to the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    AtomicInteger atomicInteger = new AtomicInteger();
    atomicInteger.addAndGet(10);
    System.out.println(atomicInteger.get());
}

From source file:alluxio.cli.MiniBenchmark.java

/**
 * @param args there are no arguments needed
 * @throws Exception if error occurs during tests
 *//*  w w w.  j  ava 2 s .  com*/
public static void main(String[] args) throws Exception {
    if (!parseInputArgs(args)) {
        usage();
        System.exit(-1);
    }
    if (sHelp) {
        usage();
        System.exit(0);
    }

    CommonUtils.warmUpLoop();

    for (int i = 0; i < sIterations; ++i) {
        final AtomicInteger count = new AtomicInteger(0);
        final CyclicBarrier barrier = new CyclicBarrier(sConcurrency);
        ExecutorService executorService = Executors.newFixedThreadPool(sConcurrency);
        final AtomicLong runtime = new AtomicLong(0);
        for (int j = 0; j < sConcurrency; ++j) {
            switch (sType) {
            case READ:
                executorService.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            readFile(barrier, runtime, count.addAndGet(1));
                        } catch (Exception e) {
                            LOG.error("Failed to read file.", e);
                            System.exit(-1);
                        }
                    }
                });
                break;
            case WRITE:
                executorService.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            writeFile(barrier, runtime, count.addAndGet(1));
                        } catch (Exception e) {
                            LOG.error("Failed to write file.", e);
                            System.exit(-1);
                        }
                    }
                });
                break;
            default:
                throw new RuntimeException("Unsupported type.");
            }
        }
        executorService.shutdown();
        Preconditions.checkState(executorService.awaitTermination(1, TimeUnit.HOURS));
        double time = runtime.get() * 1.0 / sConcurrency / Constants.SECOND_NANO;
        System.out.printf("Iteration: %d; Duration: %f seconds; Aggregated throughput: %f GB/second.%n", i,
                time, sConcurrency * 1.0 * sFileSize / time / Constants.GB);
    }
}

From source file:com.khartec.waltz.jobs.sample.MeasurablesGenerator.java

private static void generateRegions(DSLContext dsl) throws IOException {
    List<String> lines = readLines(OrgUnitGenerator.class.getResourceAsStream("/regions.csv"));

    System.out.println("Deleting existing Regions & Countries ...");
    int deletedCount = dsl.deleteFrom(MEASURABLE)
            .where(MEASURABLE.MEASURABLE_CATEGORY_ID
                    .in(DSL.select(MEASURABLE_CATEGORY.ID).from(MEASURABLE_CATEGORY)
                            .where(MEASURABLE_CATEGORY.EXTERNAL_ID.eq(REGION_CATEGORY_EXTERNAL_ID))))
            .and(MEASURABLE.PROVENANCE.eq("demo")).execute();
    System.out.println("Deleted: " + deletedCount + " existing Regions & Countries");

    Map<String, Map<String, Set<String>>> regionHierarchy = lines.stream().skip(1)
            .map(line -> StringUtils.splitPreserveAllTokens(line, ","))
            .filter(cells -> notEmpty(cells[0]) && notEmpty(cells[6]) && notEmpty(cells[5]))
            .map(cells -> Tuple.tuple(cells[0], cells[6], cells[5]))
            .collect(groupingBy(t -> t.v3, groupingBy(t -> t.v2, mapping(t -> t.v1, toSet()))));

    final long measurableCategoryId = dsl.select(MEASURABLE_CATEGORY.ID).from(MEASURABLE_CATEGORY)
            .where(MEASURABLE_CATEGORY.EXTERNAL_ID.eq(REGION_CATEGORY_EXTERNAL_ID)).fetchAny().value1();

    AtomicInteger insertCount = new AtomicInteger(0);
    regionHierarchy.forEach((region, subRegionMap) -> {
        final long regionId = dsl.insertInto(MEASURABLE)
                .set(createRegion(null, region, measurableCategoryId, false)).returning(MEASURABLE.ID)
                .fetchOne().getId();/*from w  ww . ja  v  a  2  s  . com*/
        insertCount.incrementAndGet();

        subRegionMap.forEach((subRegion, countries) -> {
            final long subRegionId = dsl.insertInto(MEASURABLE)
                    .set(createRegion(regionId, subRegion, measurableCategoryId, true)).returning(MEASURABLE.ID)
                    .fetchOne().getId();
            insertCount.incrementAndGet();

            insertCount.addAndGet(dsl.batchInsert(countries.stream()
                    .map(country -> createRegion(subRegionId, country, measurableCategoryId, true))
                    .collect(toList())).execute().length);
        });
    });

    System.out.println("Inserted: " + insertCount + " Regions & Countries");
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static void runIbrTest(final long ibrInterval) throws Exception {
    final ExecutorService executor = createExecutor();
    final Random ran = new Random();

    final Configuration conf = newConf(ibrInterval);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();

    try {/*  w ww  .  ja v a  2  s .co m*/
        final String dirPathString = "/dir";
        final Path dir = new Path(dirPathString);
        dfs.mkdirs(dir);

        // start testing
        final long testStartTime = Time.monotonicNow();
        final ExecutorCompletionService<Path> createService = new ExecutorCompletionService<>(executor);
        final AtomicLong createFileTime = new AtomicLong();
        final AtomicInteger numBlockCreated = new AtomicInteger();

        // create files
        for (int i = 0; i < NUM_FILES; i++) {
            createService.submit(new Callable<Path>() {
                @Override
                public Path call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        final long seed = ran.nextLong();
                        final int numBlocks = ran.nextInt(MAX_BLOCK_NUM) + 1;
                        numBlockCreated.addAndGet(numBlocks);
                        return createFile(dir, numBlocks, seed, dfs);
                    } finally {
                        createFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }

        // verify files
        final ExecutorCompletionService<Boolean> verifyService = new ExecutorCompletionService<>(executor);
        final AtomicLong verifyFileTime = new AtomicLong();
        for (int i = 0; i < NUM_FILES; i++) {
            final Path file = createService.take().get();
            verifyService.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        return verifyFile(file, dfs);
                    } finally {
                        verifyFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }
        for (int i = 0; i < NUM_FILES; i++) {
            Assert.assertTrue(verifyService.take().get());
        }
        final long testEndTime = Time.monotonicNow();

        LOG.info("ibrInterval=" + ibrInterval + " ("
                + toConfString(DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY, conf) + "), numBlockCreated="
                + numBlockCreated);
        LOG.info("duration=" + toSecondString(testEndTime - testStartTime) + ", createFileTime="
                + toSecondString(createFileTime.get()) + ", verifyFileTime="
                + toSecondString(verifyFileTime.get()));
        LOG.info("NUM_FILES=" + NUM_FILES + ", MAX_BLOCK_NUM=" + MAX_BLOCK_NUM + ", BLOCK_SIZE=" + BLOCK_SIZE
                + ", NUM_THREADS=" + NUM_THREADS + ", NUM_DATANODES=" + NUM_DATANODES);
        logIbrCounts(cluster.getDataNodes());
    } finally {
        executor.shutdown();
        cluster.shutdown();
    }
}

From source file:org.kududb.client.BaseKuduTest.java

/**
 * Counts the rows from the {@code scanner} until exhaustion. It doesn't require the scanner to
 * be new, so it can be used to finish scanning a previously-started scan.
 *//*w  ww  .jav  a2 s. com*/
protected static int countRowsInScan(AsyncKuduScanner scanner) throws Exception {
    final AtomicInteger counter = new AtomicInteger();

    Callback<Object, RowResultIterator> cb = new Callback<Object, RowResultIterator>() {
        @Override
        public Object call(RowResultIterator arg) throws Exception {
            if (arg == null)
                return null;
            counter.addAndGet(arg.getNumRows());
            return null;
        }
    };

    while (scanner.hasMoreRows()) {
        Deferred<RowResultIterator> data = scanner.nextRows();
        data.addCallbacks(cb, defaultErrorCB);
        data.join(DEFAULT_SLEEP);
    }

    Deferred<RowResultIterator> closer = scanner.close();
    closer.addCallbacks(cb, defaultErrorCB);
    closer.join(DEFAULT_SLEEP);
    return counter.get();
}

From source file:jenkins.plugins.itemstorage.s3.S3UploadAllCallable.java

private void waitForUploads(AtomicInteger count, Uploads uploads) {
    count.addAndGet(uploads.count());

    try {//from  ww  w .ja va2  s .c  o m
        uploads.finishUploading();
    } catch (InterruptedException ie) {
        // clean up and bomb out
        uploads.cleanup();
        Thread.interrupted();
    }
}

From source file:org.apache.camel.impl.DefaultInflightRepository.java

public void remove(Exchange exchange) {
    int count = totalCount.decrementAndGet();
    if (LOG.isTraceEnabled()) {
        LOG.trace("Total " + count + " inflight exchanges. Last removed: " + exchange.getExchangeId());
    }//  w w  w . j  a va 2  s.  co  m

    if (exchange.getFromEndpoint() == null) {
        return;
    }

    String key = exchange.getFromEndpoint().getEndpointKey();
    AtomicInteger existing = endpointCount.get(key);
    if (existing != null) {
        existing.addAndGet(-1);
    }
}

From source file:org.apache.camel.impl.DefaultInflightRepository.java

public void add(Exchange exchange) {
    int count = totalCount.incrementAndGet();
    if (LOG.isTraceEnabled()) {
        LOG.trace("Total " + count + " inflight exchanges. Last added: " + exchange.getExchangeId());
    }// w  ww.j  av  a 2 s. c o m

    if (exchange.getFromEndpoint() == null) {
        return;
    }

    String key = exchange.getFromEndpoint().getEndpointKey();
    AtomicInteger existing = endpointCount.putIfAbsent(key, new AtomicInteger(1));
    if (existing != null) {
        existing.addAndGet(1);
    }
}

From source file:com.github.brandtg.switchboard.LogRegionResource.java

private void handleData(String target, List<LogRegion> logRegions, LogRegionResponse response)
        throws Exception {
    if (target != null) {
        final AtomicInteger contentLength = new AtomicInteger();
        for (LogRegion logRegion : logRegions) {
            contentLength.addAndGet((int) (logRegion.getNextFileOffset() - logRegion.getFileOffset()));
        }/*w w  w.  j  a v a 2  s.c o m*/
        String[] hostPort = target.split(":");
        InetSocketAddress socketAddress = new InetSocketAddress(hostPort[0], Integer.valueOf(hostPort[1]));
        bootstrap.connect(socketAddress).addListener(new LogFileSender(logRegions, target));
        response.setDataSize(contentLength.get());
    } else {
        Map<Long, String> data = new HashMap<Long, String>(logRegions.size());
        for (LogRegion logRegion : logRegions) {
            data.put(logRegion.getIndex(), Base64.encodeBase64String(logReader.read(logRegion)));
        }
        response.setData(data);
    }
}

From source file:org.orbisgis.sos.LeqStats.java

public void addLeq(double leq) {
    leqMin = Math.min(leqMin, leq);
    leqMax = Math.max(leqMax, leq);
    rmsSum += Math.pow(10., leq / 10.);
    int key = (int) (leq / classStep);
    AtomicInteger leqCounter = leqClass.get(key);
    if (leqCounter == null) {
        leqCounter = new AtomicInteger(0);
        leqClass.put(key, leqCounter);/*from  w  w  w .  j a va  2  s.c om*/
    }
    leqCounter.addAndGet(1);
    rmsSumCount++;
}