Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.joyent.manta.benchmark.Benchmark.java

/**
 * Method used to run a multi-threaded benchmark.
 *
 * @param method to measure/*from   w w w  .j ava2  s  .  co m*/
 * @param path path to store benchmarking test data
 * @param iterations number of iterations to run
 * @param concurrency number of threads to run
 * @throws IOException thrown when we can't communicate with the server
 */
private static void multithreadedBenchmark(final String method, final String path, final int iterations,
        final int concurrency) throws IOException {
    final AtomicLong fullAggregation = new AtomicLong(0L);
    final AtomicLong serverAggregation = new AtomicLong(0L);
    final AtomicLong count = new AtomicLong(0L);
    final long perThreadCount = perThreadCount(iterations, concurrency);

    System.out.printf("Running %d iterations per thread\n", perThreadCount);

    final long testStart = System.nanoTime();

    Runtime.getRuntime().addShutdownHook(new Thread(Benchmark::cleanUp));

    final Callable<Void> worker = () -> {
        for (int i = 0; i < perThreadCount; i++) {
            Duration[] durations;

            if (method.equals("put")) {
                durations = measurePut(sizeInBytesOrNoOfDirs);
            } else if (method.equals("putDir")) {
                durations = measurePutDir(sizeInBytesOrNoOfDirs);
            } else {
                durations = measureGet(path);
            }

            long fullLatency = durations[0].toMillis();
            long serverLatency = durations[1].toMillis();
            fullAggregation.addAndGet(fullLatency);
            serverAggregation.addAndGet(serverLatency);

            System.out.printf("%s %d full=%dms, server=%dms, thread=%s\n", method, count.getAndIncrement(),
                    fullLatency, serverLatency, Thread.currentThread().getName());
        }

        return null;
    };

    final Thread.UncaughtExceptionHandler handler = (t, e) -> LOG.error("Error when executing benchmark", e);

    final AtomicInteger threadCounter = new AtomicInteger(0);
    ThreadFactory threadFactory = r -> {
        Thread t = new Thread(r);
        t.setDaemon(true);
        t.setUncaughtExceptionHandler(handler);
        t.setName(String.format("benchmark-%d", threadCounter.incrementAndGet()));

        return t;
    };

    ExecutorService executor = Executors.newFixedThreadPool(concurrency, threadFactory);

    List<Callable<Void>> workers = new ArrayList<>(concurrency);
    for (int i = 0; i < concurrency; i++) {
        workers.add(worker);
    }

    try {
        List<Future<Void>> futures = executor.invokeAll(workers);

        boolean completed = false;
        while (!completed) {
            try (Stream<Future<Void>> stream = futures.stream()) {
                completed = stream.allMatch((f) -> f.isDone() || f.isCancelled());

                if (!completed) {
                    Thread.sleep(CHECK_INTERVAL);
                }
            }
        }

    } catch (InterruptedException e) {
        return;
    } finally {
        System.err.println("Shutting down the thread pool");
        executor.shutdown();
    }

    final long testEnd = System.nanoTime();

    final long fullAverage = Math.round(fullAggregation.get() / iterations);
    final long serverAverage = Math.round(serverAggregation.get() / iterations);
    final long totalTime = Duration.ofNanos(testEnd - testStart).toMillis();

    System.out.printf("Average full latency: %d ms\n", fullAverage);
    System.out.printf("Average server latency: %d ms\n", serverAverage);
    System.out.printf("Total test time: %d ms\n", totalTime);
    System.out.printf("Total invocations: %d\n", count.get());
}

From source file:net.sourceforge.pmd.PMD.java

/**
 * This method is the main entry point for command line usage.
 *
 * @param configuration/*from  www.j a v  a2  s . co m*/
 *            the configure to use
 * @return number of violations found.
 */
public static int doPMD(PMDConfiguration configuration) {

    // Load the RuleSets
    RuleSetFactory ruleSetFactory = RulesetsFactoryUtils.getRulesetFactory(configuration);
    RuleSets ruleSets = RulesetsFactoryUtils.getRuleSetsWithBenchmark(configuration.getRuleSets(),
            ruleSetFactory);
    if (ruleSets == null) {
        return 0;
    }

    Set<Language> languages = getApplicableLanguages(configuration, ruleSets);
    List<DataSource> files = getApplicableFiles(configuration, languages);

    long reportStart = System.nanoTime();
    try {
        Renderer renderer = configuration.createRenderer();
        List<Renderer> renderers = Collections.singletonList(renderer);

        renderer.setWriter(IOUtil.createWriter(configuration.getReportFile()));
        renderer.start();

        Benchmarker.mark(Benchmark.Reporting, System.nanoTime() - reportStart, 0);

        RuleContext ctx = new RuleContext();
        final AtomicInteger violations = new AtomicInteger(0);
        ctx.getReport().addListener(new ReportListener() {
            @Override
            public void ruleViolationAdded(RuleViolation ruleViolation) {
                violations.incrementAndGet();
            }

            @Override
            public void metricAdded(Metric metric) {
            }
        });

        processFiles(configuration, ruleSetFactory, files, ctx, renderers);

        reportStart = System.nanoTime();
        renderer.end();
        renderer.flush();
        return violations.get();
    } catch (Exception e) {
        String message = e.getMessage();
        if (message != null) {
            LOG.severe(message);
        } else {
            LOG.log(Level.SEVERE, "Exception during processing", e);
        }
        LOG.log(Level.FINE, "Exception during processing", e);
        LOG.info(PMDCommandLineInterface.buildUsageText());
        return 0;
    } finally {
        Benchmarker.mark(Benchmark.Reporting, System.nanoTime() - reportStart, 0);
    }
}

From source file:org.apache.cassandra.db.commitlog.CommitLog.java

public static int recover(File[] clogs) throws IOException {
    final Set<Table> tablesRecovered = new HashSet<Table>();
    List<Future<?>> futures = new ArrayList<Future<?>>();
    byte[] bytes = new byte[4096];
    Map<Integer, AtomicInteger> invalidMutations = new HashMap<Integer, AtomicInteger>();

    // count the number of replayed mutation. We don't really care about atomicity, but we need it to be a reference.
    final AtomicInteger replayedCount = new AtomicInteger();

    // compute per-CF and global replay positions
    final Map<Integer, ReplayPosition> cfPositions = new HashMap<Integer, ReplayPosition>();
    for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
        // it's important to call RP.gRP per-cf, before aggregating all the positions w/ the Ordering.min call
        // below: gRP will return NONE if there are no flushed sstables, which is important to have in the
        // list (otherwise we'll just start replay from the first flush position that we do have, which is not correct).
        ReplayPosition rp = ReplayPosition.getReplayPosition(cfs.getSSTables());
        cfPositions.put(cfs.metadata.cfId, rp);
    }// w ww .  j  a va2s  .c o m
    final ReplayPosition globalPosition = Ordering.from(ReplayPosition.comparator).min(cfPositions.values());

    for (final File file : clogs) {
        final long segment = CommitLogSegment.idFromFilename(file.getName());

        int bufferSize = (int) Math.min(Math.max(file.length(), 1), 32 * 1024 * 1024);
        BufferedRandomAccessFile reader = new BufferedRandomAccessFile(new File(file.getAbsolutePath()), "r",
                bufferSize, true);
        assert reader.length() <= Integer.MAX_VALUE;

        try {
            int replayPosition;
            if (globalPosition.segment < segment)
                replayPosition = 0;
            else if (globalPosition.segment == segment)
                replayPosition = globalPosition.position;
            else
                replayPosition = (int) reader.length();

            if (replayPosition < 0 || replayPosition >= reader.length()) {
                // replayPosition > reader.length() can happen if some data gets flushed before it is written to the commitlog
                // (see https://issues.apache.org/jira/browse/CASSANDRA-2285)
                logger.debug("skipping replay of fully-flushed {}", file);
                continue;
            }

            reader.seek(replayPosition);

            if (logger.isDebugEnabled())
                logger.debug("Replaying " + file + " starting at " + reader.getFilePointer());

            /* read the logs populate RowMutation and apply */
            while (!reader.isEOF()) {
                if (logger.isDebugEnabled())
                    logger.debug("Reading mutation at " + reader.getFilePointer());

                long claimedCRC32;
                Checksum checksum = new CRC32();
                int serializedSize;
                try {
                    // any of the reads may hit EOF
                    serializedSize = reader.readInt();
                    // RowMutation must be at LEAST 10 bytes:
                    // 3 each for a non-empty Table and Key (including the 2-byte length from
                    // writeUTF/writeWithShortLength) and 4 bytes for column count.
                    // This prevents CRC by being fooled by special-case garbage in the file; see CASSANDRA-2128
                    if (serializedSize < 10)
                        break;
                    long claimedSizeChecksum = reader.readLong();
                    checksum.update(serializedSize);
                    if (checksum.getValue() != claimedSizeChecksum)
                        break; // entry wasn't synced correctly/fully.  that's ok.

                    if (serializedSize > bytes.length)
                        bytes = new byte[(int) (1.2 * serializedSize)];
                    reader.readFully(bytes, 0, serializedSize);
                    claimedCRC32 = reader.readLong();
                } catch (EOFException eof) {
                    break; // last CL entry didn't get completely written.  that's ok.
                }

                checksum.update(bytes, 0, serializedSize);
                if (claimedCRC32 != checksum.getValue()) {
                    // this entry must not have been fsynced.  probably the rest is bad too,
                    // but just in case there is no harm in trying them (since we still read on an entry boundary)
                    continue;
                }

                /* deserialize the commit log entry */
                ByteArrayInputStream bufIn = new ByteArrayInputStream(bytes, 0, serializedSize);
                RowMutation rm = null;
                try {
                    // assuming version here. We've gone to lengths to make sure what gets written to the CL is in
                    // the current version.  so do make sure the CL is drained prior to upgrading a node.
                    rm = RowMutation.serializer().deserialize(new DataInputStream(bufIn),
                            MessagingService.version_, false);
                } catch (UnserializableColumnFamilyException ex) {
                    AtomicInteger i = invalidMutations.get(ex.cfId);
                    if (i == null) {
                        i = new AtomicInteger(1);
                        invalidMutations.put(ex.cfId, i);
                    } else
                        i.incrementAndGet();
                    continue;
                }

                if (logger.isDebugEnabled())
                    logger.debug(String.format("replaying mutation for %s.%s: %s", rm.getTable(),
                            ByteBufferUtil.bytesToHex(rm.key()),
                            "{" + StringUtils.join(rm.getColumnFamilies(), ", ") + "}"));

                final long entryLocation = reader.getFilePointer();
                final RowMutation frm = rm;
                Runnable runnable = new WrappedRunnable() {
                    public void runMayThrow() throws IOException {
                        if (DatabaseDescriptor.getKSMetaData(frm.getTable()) == null)
                            return;
                        final Table table = Table.open(frm.getTable());
                        RowMutation newRm = new RowMutation(frm.getTable(), frm.key());

                        // Rebuild the row mutation, omitting column families that a) have already been flushed,
                        // b) are part of a cf that was dropped. Keep in mind that the cf.name() is suspect. do every
                        // thing based on the cfid instead.
                        for (ColumnFamily columnFamily : frm.getColumnFamilies()) {
                            if (CFMetaData.getCF(columnFamily.id()) == null)
                                // null means the cf has been dropped
                                continue;

                            ReplayPosition rp = cfPositions.get(columnFamily.id());

                            // replay if current segment is newer than last flushed one or, if it is the last known
                            // segment, if we are after the replay position
                            if (segment > rp.segment
                                    || (segment == rp.segment && entryLocation > rp.position)) {
                                newRm.add(columnFamily);
                                replayedCount.incrementAndGet();
                            }
                        }
                        if (!newRm.isEmpty()) {
                            Table.open(newRm.getTable()).apply(newRm, false);
                            tablesRecovered.add(table);
                        }
                    }
                };
                futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable));
                if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT) {
                    FBUtilities.waitOnFutures(futures);
                    futures.clear();
                }
            }
        } finally {
            FileUtils.closeQuietly(reader);
            logger.info("Finished reading " + file);
        }
    }

    for (Map.Entry<Integer, AtomicInteger> entry : invalidMutations.entrySet())
        logger.info(String.format("Skipped %d mutations from unknown (probably removed) CF with id %d",
                entry.getValue().intValue(), entry.getKey()));

    // wait for all the writes to finish on the mutation stage
    FBUtilities.waitOnFutures(futures);
    logger.debug("Finished waiting on mutations from recovery");

    // flush replayed tables
    futures.clear();
    for (Table table : tablesRecovered)
        futures.addAll(table.flush());
    FBUtilities.waitOnFutures(futures);

    return replayedCount.get();
}

From source file:ai.grakn.graql.GraqlShell.java

private static void sendBatchRequest(BatchMutatorClient batchMutatorClient, String graqlPath,
        Optional<Integer> activeTasks, Optional<Integer> batchSize) throws IOException {
    AtomicInteger numberBatchesCompleted = new AtomicInteger(0);

    activeTasks.ifPresent(batchMutatorClient::setNumberActiveTasks);
    batchSize.ifPresent(batchMutatorClient::setBatchSize);

    batchMutatorClient.setTaskCompletionConsumer((json) -> {
        TaskStatus status = TaskStatus.valueOf(json.at("status").asString());

        numberBatchesCompleted.incrementAndGet();
        System.out.println(format("Status of batch: %s", status));
        System.out.println(format("Number batches completed: %s", numberBatchesCompleted.get()));
        System.out.println(format("Approximate queries executed: %s",
                numberBatchesCompleted.get() * batchMutatorClient.getBatchSize()));
    });//  w  ww . j  a va 2 s.  c o m

    String queries = loadQuery(graqlPath);

    Graql.parseList(queries).forEach(batchMutatorClient::add);

    batchMutatorClient.waitToFinish();
}

From source file:com.ikanow.aleph2.example.flume_harvester.utils.FlumeUtils.java

/** Auto-generates the flume config from an input block
 *  If it's in test mode it also deletes the trackerDir (so this can be used for purging)
 * @param bucket_config//from   ww  w . j a  v a 2 s. c o  m
 * @param morphlines_config_path
 * @param test_mode
 * @return
 */
public static FlumeBucketConfigBean createAutoFlumeConfig(final DataBucketBean bucket,
        final FlumeBucketConfigBean bucket_config, final boolean test_mode) {
    //TODO (ALEPH-10): eventually add support for additiona short cuts here
    //TODO (ALEPH-10): security

    final Collection<SpoolDirConfig> dirs = getSpoolDirs(bucket_config);
    final AtomicInteger counter = new AtomicInteger(0);

    if (!dirs.isEmpty()) {
        final ImmutableMap<String, String> new_flume_builder = dirs.stream()
                .reduce(ImmutableMap.<String, String>builder()
                        // defaults
                        .put("channels", "mem").put("channels:mem:capacity", "1000")
                        .put("channels:mem:transactionCapacity", "100").put("channels:mem:type", "memory"),
                        (acc, v) -> {
                            final int count = counter.incrementAndGet();

                            // (some tidy up that occurs in test mode)
                            return Optional.<ImmutableMap.Builder<String, String>>of(acc
                                    .put("sources:file_in_" + count + ":type", "spooldir")
                                    .put("sources:file_in_" + count + ":channels", "mem")
                                    .put("sources:file_in_" + count + ":trackerDir",
                                            getTrackingDirSuffix(bucket))
                                    .put("sources:file_in_" + count + ":deletePolicy",
                                            (v.delete_on_ingest() ? "immediate" : "never"))
                                    .put("sources:file_in_" + count + ":spoolDir",
                                            test_mode ? v.path() + "/" + getTestDirSuffix(bucket) : v.path())
                                    .put("sources:file_in_" + count + ":ignorePattern",
                                            Optional.ofNullable(v.ignore_pattern()).orElse("^$")))
                                    // Some optional fields
                                    .map(acc2 -> {
                                        return Optional.ofNullable(v.append_basename_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":basenameHeader", "true")
                                                .put("sources:file_in_" + count + ":basenameHeaderKey", field))
                                                .orElse(acc);
                                    }).map(acc2 -> {
                                        return Optional.ofNullable(v.append_path_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":fileHeader", "true")
                                                .put("sources:file_in_" + count + ":fileHeaderKey", field))
                                                .orElse(acc);
                                    }).get();
                        }, (acc1, acc2) -> acc1 // (can't happen in practice)   
                ).put("sources", StreamUtils.zipWithIndex(dirs.stream())
                        .map(i -> ("file_in_" + (1 + i.getIndex()))).collect(Collectors.joining(" ")))
                .build();
        ;

        // Clone the config with the new flume config
        return BeanTemplateUtils.clone(bucket_config)
                .with(FlumeBucketConfigBean::flume_config, new_flume_builder).done();
    } else { // Leave unchanged
        return bucket_config;
    }
}

From source file:net.sf.jasperreports.engine.fill.DelayedFillActions.java

private static int assignId(BaseReportFiller reportFiller) {
    AtomicInteger counter = (AtomicInteger) reportFiller.fillContext.getFillCache(FILL_CACHE_KEY_ID);
    if (counter == null) {
        // we just need a mutable integer, there's no actual concurrency here
        counter = new AtomicInteger();
        reportFiller.fillContext.setFillCache(FILL_CACHE_KEY_ID, counter);
    }//from   w ww  .ja  v  a2 s . c o  m

    return counter.incrementAndGet();
}

From source file:org.talend.dataprep.command.CommandHelper.java

/**
 * Return a Publisher of type T out of the the hystrix command.
 *
 * @param clazz the wanted stream type.// www .  j a  v a  2 s . co  m
 * @param mapper the object mapper used to parse objects.
 * @param command the hystrix command to deal with.
 * @param <T> the type of objects to stream.
 * @return a Publisher<T></T> out of the hystrix command response body.
 */
public static <T> Publisher<T> toPublisher(final Class<T> clazz, final ObjectMapper mapper,
        final HystrixCommand<InputStream> command) {
    AtomicInteger count = new AtomicInteger(0);
    return Flux.create(sink -> {
        final Observable<InputStream> observable = command.toObservable();
        observable.map(i -> {
            try {
                return mapper.readerFor(clazz).<T>readValues(i);
            } catch (IOException e) {
                throw new TDPException(CommonErrorCodes.UNEXPECTED_EXCEPTION, e);
            }
        }) //
                .doOnCompleted(() -> LOGGER.debug("Completed command '{}' (emits '{}') with '{}' records.",
                        command.getClass().getName(), clazz.getName(), count.get())) //
                .toBlocking() //
                .forEach(s -> {
                    while (s.hasNext()) {
                        sink.next(s.next());
                        count.incrementAndGet();
                    }
                    sink.complete();
                });
    }, FluxSink.OverflowStrategy.BUFFER);
}

From source file:fr.landel.utils.assertor.utils.AssertorIterable.java

private static <I extends Iterable<T>, T> boolean hasInOrder(final I iterable1, final Iterable<T> iterable2,
        final boolean not, final EnumAnalysisMode analysisMode) {

    long found = 0;
    final int size1 = IterableUtils.size(iterable1);
    final int size2 = IterableUtils.size(iterable2);

    if (size1 < size2) {
        return not;
    } else if (size1 == size2) {
        return not ^ iterable1.equals(iterable2);
    }/*from   ww  w .ja v  a  2 s. com*/

    if (EnumAnalysisMode.STANDARD.equals(analysisMode)) {
        final Iterator<T> iterator1 = iterable1.iterator();
        Iterator<T> iterator2 = iterable2.iterator();

        // not empty pre-check, so we call next directly
        T value2 = iterator2.next();
        while (iterator1.hasNext() && found < size2) {
            if (Objects.equals(iterator1.next(), value2)) {
                ++found;
                if (iterator2.hasNext()) {
                    value2 = iterator2.next();
                }
            } else if (found > 0) {
                found = 0;
                iterator2 = iterable2.iterator();
                value2 = iterator2.next();
            }
        }
    } else {
        final AtomicInteger count = new AtomicInteger(0);

        final List<T> list2 = IterableUtils.toList(iterable2);

        StreamSupport.stream(iterable1.spliterator(), EnumAnalysisMode.PARALLEL.equals(analysisMode))
                .forEachOrdered(o -> {
                    int inc = count.get();
                    if (inc < size2) {
                        if (Objects.equals(o, list2.get(inc))) {
                            count.incrementAndGet();
                        } else if (inc > 0) {
                            count.set(0);
                        }
                    }
                });

        found = count.get();
    }

    return not ^ (found == size2);
}

From source file:com.mcleodmoores.mvn.natives.defaults.Defaults.java

private static void saveMultiple(final Properties properties, final String prefix, final String key,
        final SourceDefaults[] sources, final AtomicInteger identifier) {
    if (sources != null) {
        final String[] values = new String[sources.length];
        for (int i = 0; i < sources.length; i++) {
            sources[i].saveImpl(properties, values[i] = new StringBuilder().append(key.charAt(0))
                    .append(identifier.incrementAndGet()).toString(), identifier);
        }/*  w w  w .j a v a 2  s . c o m*/
        setSingle(properties, prefix, key, StringUtils.join(values, ';'));
    }
}

From source file:org.apache.hadoop.hbase.client.TestAsyncProcess.java

static MultiResponse createMultiResponse(final MultiAction<Row> multi, AtomicInteger nbMultiResponse,
        AtomicInteger nbActions, ResponseGenerator gen) {
    final MultiResponse mr = new MultiResponse();
    nbMultiResponse.incrementAndGet();
    for (Map.Entry<byte[], List<Action<Row>>> entry : multi.actions.entrySet()) {
        byte[] regionName = entry.getKey();
        for (Action<Row> a : entry.getValue()) {
            nbActions.incrementAndGet();
            gen.addResponse(mr, regionName, a);
        }/*from w  ww .j av a 2s . c  o  m*/
    }
    return mr;
}