Example usage for java.util.concurrent.atomic AtomicLong addAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong addAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong addAndGet.

Prototype

public final long addAndGet(long delta) 

Source Link

Document

Atomically adds the given value to the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:ubicrypt.core.util.OnSubscribeInputStreamTest.java

@Test
public void testBig2() throws Exception {
    final AtomicLong count = new AtomicLong();
    final CountDownLatch cd = new CountDownLatch(1);
    Observable//from www  .jav  a  2  s .c  om
            .create(new OnSubscribeInputStream(
                    new ByteArrayInputStream(StringUtils.repeat('a', (2 << 16) - 1).getBytes()), 1 << 16))
            .doOnCompleted(cd::countDown).subscribe(next -> count.addAndGet(next.length));
    assertThat(cd.await(2, TimeUnit.SECONDS)).isTrue();
    assertThat(count.get()).isEqualTo((2 << 16) - 1);
}

From source file:org.archive.crawler.admin.StatisticsTracker.java

/**
 * Increment a counter for a key in a given HashMap by an arbitrary amount.
 * Used for various aggregate data. The increment amount can be negative.
 *
 * @param map//w ww .ja  v a  2 s  .c o m
 *            The Map or ConcurrentMap
 * @param key
 *            The key for the counter to be incremented, if it does not exist
 *            it will be added (set to equal to <code>increment</code>).
 *            If null it will increment the counter "unknown".
 * @param increment
 *            The amount to increment counter related to the <code>key</code>.
 */
protected static void incrementMapCount(ConcurrentMap<String, AtomicLong> map, String key, long increment) {
    if (key == null) {
        key = "unknown";
    }
    AtomicLong lw = map.get(key);
    if (lw == null) {
        lw = new AtomicLong();
        AtomicLong prevVal = map.putIfAbsent(key, lw);
        if (prevVal != null) {
            lw = prevVal;
        }
    }
    lw.addAndGet(increment);
}

From source file:it.polimi.diceH2020.SPACE4CloudWS.core.CoarseGrainedOptimizer.java

void hillClimbing(Solution solution) {
    logger.info(// www .  ja  va2s  .  c o  m
            String.format("---------- Starting hill climbing for instance %s ----------", solution.getId()));
    Technology technology = solverChecker.enforceSolverSettings(solution.getLstSolutions());

    List<SolutionPerJob> lst = solution.getLstSolutions();
    Stream<SolutionPerJob> strm = settings.isParallel() ? lst.parallelStream() : lst.stream();
    AtomicLong executionTime = new AtomicLong();
    boolean overallSuccess = strm.map(s -> {
        Instant first = Instant.now();
        boolean success = hillClimbing(s, technology);
        Instant after = Instant.now();
        executionTime.addAndGet(Duration.between(first, after).toMillis());
        return success;
    }).reduce(true, Boolean::logicalAnd);

    if (!overallSuccess)
        stateHandler.sendEvent(Events.STOP);
    else {
        solution.setEvaluated(false);
        evaluator.evaluate(solution);

        Phase phase = new Phase();
        phase.setId(PhaseID.OPTIMIZATION);
        phase.setDuration(executionTime.get());
        solution.addPhase(phase);
    }
}

From source file:com.cloudera.lib.service.instrumentation.InstrumentationService.java

@Override
public void incr(String group, String name, long count) {
    AtomicLong counter = getToAdd(group, name, AtomicLong.class, counterLock, counters);
    counter.addAndGet(count);
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testIncrement() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 100;
    CountDownLatch latch = new CountDownLatch(count);
    AtomicLong sum = new AtomicLong(0L);
    IntStream.range(0, count)/*from w w  w .  j  av  a 2 s .  c  o m*/
            .forEach(i -> table.incrementColumnValue(row, FAMILY, QUALIFIER, 1).thenAccept(x -> {
                sum.addAndGet(x);
                latch.countDown();
            }));
    latch.await();
    assertEquals(count, Bytes
            .toLong(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER)));
    assertEquals((1 + count) * count / 2, sum.get());
}

From source file:com.joyent.manta.benchmark.Benchmark.java

/**
 * Method used to run a multi-threaded benchmark.
 *
 * @param method to measure/*from  w w  w .  j a v a  2 s. c o m*/
 * @param path path to store benchmarking test data
 * @param iterations number of iterations to run
 * @param concurrency number of threads to run
 * @throws IOException thrown when we can't communicate with the server
 */
private static void multithreadedBenchmark(final String method, final String path, final int iterations,
        final int concurrency) throws IOException {
    final AtomicLong fullAggregation = new AtomicLong(0L);
    final AtomicLong serverAggregation = new AtomicLong(0L);
    final AtomicLong count = new AtomicLong(0L);
    final long perThreadCount = perThreadCount(iterations, concurrency);

    System.out.printf("Running %d iterations per thread\n", perThreadCount);

    final long testStart = System.nanoTime();

    Runtime.getRuntime().addShutdownHook(new Thread(Benchmark::cleanUp));

    final Callable<Void> worker = () -> {
        for (int i = 0; i < perThreadCount; i++) {
            Duration[] durations;

            if (method.equals("put")) {
                durations = measurePut(sizeInBytesOrNoOfDirs);
            } else if (method.equals("putDir")) {
                durations = measurePutDir(sizeInBytesOrNoOfDirs);
            } else {
                durations = measureGet(path);
            }

            long fullLatency = durations[0].toMillis();
            long serverLatency = durations[1].toMillis();
            fullAggregation.addAndGet(fullLatency);
            serverAggregation.addAndGet(serverLatency);

            System.out.printf("%s %d full=%dms, server=%dms, thread=%s\n", method, count.getAndIncrement(),
                    fullLatency, serverLatency, Thread.currentThread().getName());
        }

        return null;
    };

    final Thread.UncaughtExceptionHandler handler = (t, e) -> LOG.error("Error when executing benchmark", e);

    final AtomicInteger threadCounter = new AtomicInteger(0);
    ThreadFactory threadFactory = r -> {
        Thread t = new Thread(r);
        t.setDaemon(true);
        t.setUncaughtExceptionHandler(handler);
        t.setName(String.format("benchmark-%d", threadCounter.incrementAndGet()));

        return t;
    };

    ExecutorService executor = Executors.newFixedThreadPool(concurrency, threadFactory);

    List<Callable<Void>> workers = new ArrayList<>(concurrency);
    for (int i = 0; i < concurrency; i++) {
        workers.add(worker);
    }

    try {
        List<Future<Void>> futures = executor.invokeAll(workers);

        boolean completed = false;
        while (!completed) {
            try (Stream<Future<Void>> stream = futures.stream()) {
                completed = stream.allMatch((f) -> f.isDone() || f.isCancelled());

                if (!completed) {
                    Thread.sleep(CHECK_INTERVAL);
                }
            }
        }

    } catch (InterruptedException e) {
        return;
    } finally {
        System.err.println("Shutting down the thread pool");
        executor.shutdown();
    }

    final long testEnd = System.nanoTime();

    final long fullAverage = Math.round(fullAggregation.get() / iterations);
    final long serverAverage = Math.round(serverAggregation.get() / iterations);
    final long totalTime = Duration.ofNanos(testEnd - testStart).toMillis();

    System.out.printf("Average full latency: %d ms\n", fullAverage);
    System.out.printf("Average server latency: %d ms\n", serverAverage);
    System.out.printf("Total test time: %d ms\n", totalTime);
    System.out.printf("Total invocations: %d\n", count.get());
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testAppend() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 10;
    CountDownLatch latch = new CountDownLatch(count);
    char suffix = ':';
    AtomicLong suffixCount = new AtomicLong(0L);
    IntStream.range(0, count).forEachOrdered(i -> table
            .append(new Append(row).add(FAMILY, QUALIFIER, Bytes.toBytes("" + i + suffix))).thenAccept(r -> {
                suffixCount.addAndGet(
                        Bytes.toString(r.getValue(FAMILY, QUALIFIER)).chars().filter(x -> x == suffix).count());
                latch.countDown();/* ww w  .j  a  v a 2s.c om*/
            }));
    latch.await();
    assertEquals((1 + count) * count / 2, suffixCount.get());
    String value = Bytes
            .toString(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER));
    int[] actual = Arrays.asList(value.split("" + suffix)).stream().mapToInt(Integer::parseInt).sorted()
            .toArray();
    assertArrayEquals(IntStream.range(0, count).toArray(), actual);
}

From source file:net.arp7.HdfsPerfTest.WriteFile.java

private static void writeFiles(final Configuration conf, final FileIoStats stats)
        throws InterruptedException, IOException {
    final FileSystem fs = FileSystem.get(conf);
    final AtomicLong filesLeft = new AtomicLong(params.getNumFiles());
    final long runId = abs(rand.nextLong());
    final byte[] data = new byte[params.getIoSize()];
    Arrays.fill(data, (byte) 65);

    // Start the writers.
    final ExecutorService executor = Executors.newFixedThreadPool((int) params.getNumThreads());
    final CompletionService<Object> ecs = new ExecutorCompletionService<>(executor);
    LOG.info("NumFiles=" + params.getNumFiles() + ", FileSize="
            + FileUtils.byteCountToDisplaySize(params.getFileSize()) + ", IoSize="
            + FileUtils.byteCountToDisplaySize(params.getIoSize()) + ", BlockSize="
            + FileUtils.byteCountToDisplaySize(params.getBlockSize()) + ", ReplicationFactor="
            + params.getReplication() + ", isThrottled=" + (params.maxWriteBps() > 0));
    LOG.info("Starting " + params.getNumThreads() + " writer thread" + (params.getNumThreads() > 1 ? "s" : "")
            + ".");
    final long startTime = System.nanoTime();
    for (long t = 0; t < params.getNumThreads(); ++t) {
        final long threadIndex = t;
        Callable<Object> c = new Callable<Object>() {
            @Override/* www . j a  v  a2 s .  co m*/
            public Object call() throws Exception {
                long fileIndex = 0;
                while (filesLeft.addAndGet(-1) >= 0) {
                    final String fileName = "WriteFile-" + runId + "-" + (threadIndex + 1) + "-"
                            + (++fileIndex);
                    writeOneFile(new Path(params.getOutputDir(), fileName), fs, data, stats);
                }
                return null;
            }
        };
        ecs.submit(c);
    }

    // And wait for all writers to complete.
    for (long t = 0; t < params.getNumThreads(); ++t) {
        ecs.take();
    }
    final long endTime = System.nanoTime();
    stats.setElapsedTime(endTime - startTime);
    executor.shutdown();
}

From source file:org.polymap.p4.data.importer.ImportPanel.java

@Override
public void uploadStarted(ClientFile clientFile, InputStream in) throws Exception {
    log.info(clientFile.getName() + " - " + clientFile.getType() + " - " + clientFile.getSize());

    uploadProgress(resultSection.getBody(), "Uploading ...");

    // upload file
    assert clientFile.getName() != null : "Null client file name is not supported yet.";
    File f = new File(tempDir, clientFile.getName());
    try (OutputStream out = new FileOutputStream(f)) {
        Timer timer = new Timer();
        byte[] buf = new byte[4096];
        AtomicLong count = new AtomicLong();
        for (int c = in.read(buf); c > -1; c = in.read(buf)) {
            out.write(buf, 0, c);//from  w w w .ja v a2s. co m
            count.addAndGet(c);

            if (timer.elapsedTime() > 2000) {
                Composite parent = resultSection.getBody();
                if (parent.isDisposed()) {
                    break; // stop uploading
                } else {
                    uploadProgress(resultSection.getBody(),
                            "Uploading ..." + byteCountToDisplaySize(count.get()));
                    timer.start();
                }
            }
        }
        uploadProgress(resultSection.getBody(), "Upload ...complete.");
    } catch (Exception e) {
        uploadProgress(resultSection.getBody(), "Upload ...failed.");
        async(() -> site().toolkit().createSnackbar(Appearance.FadeIn, "Unable to upload file."));
        return;
    }

    async(() -> {
        // fires event which triggers UI update in ImportsContentProvider
        context.addContextOut(f);
    });
}

From source file:jduagui.Controller.java

public static long getSize(String startPath, Map<String, Long> dirs, Map<String, Long> files)
        throws IOException {
    final AtomicLong size = new AtomicLong(0);
    final AtomicLong subdirs = new AtomicLong(0);
    final AtomicLong fs = new AtomicLong(0);
    final File f = new File(startPath);
    final String str = "";
    Path path = Paths.get(startPath);

    Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
        @Override//from  www  .j  av a 2 s .  c om
        public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
            subdirs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fs.incrementAndGet();
            size.addAndGet(attrs.size());
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
            fs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }
    });
    if (subdirs.decrementAndGet() == -1)
        subdirs.incrementAndGet();

    if (f.isDirectory()) {
        dirs.put(startPath, subdirs.get());
        files.put(startPath, fs.get());
    }
    return size.get();
}