Example usage for java.util.concurrent.atomic AtomicInteger getAndAdd

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndAdd

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndAdd.

Prototype

public final int getAndAdd(int delta) 

Source Link

Document

Atomically adds the given value to the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.languagetool.AnalyzedTokenReadings.java

/**
 * @since 2.3/*from www  .  j  a v a 2 s  .c o  m*/
 */
@Override
public Iterator<AnalyzedToken> iterator() {
    AtomicInteger i = new AtomicInteger(0);
    return new Iterator<AnalyzedToken>() {
        @Override
        public boolean hasNext() {
            return i.get() < getReadingsLength();
        }

        @Override
        public AnalyzedToken next() {
            try {
                return anTokReadings[i.getAndAdd(1)];
            } catch (ArrayIndexOutOfBoundsException e) {
                throw new NoSuchElementException(
                        "No such element: " + i + ", element count: " + anTokReadings.length);
            }
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.moe.gradle.tasks.Launchers.java

private static void setupDevicesAndSimulators(@NotNull MoePlugin plugin, @NotNull Project project,
        @NotNull List<String> devices, @NotNull List<String> simulators, @NotNull Options options,
        @Nullable XcodeBuild xcodeBuildDev, @Nullable XcodeBuild xcodeBuildSim, @NotNull Task task,
        boolean test) {
    Require.nonNull(plugin);// ww w . j  a  v a  2s. c o m
    Require.nonNull(project);
    Require.nonNull(devices);
    Require.nonNull(simulators);
    Require.nonNull(options);
    Require.nonNull(task);

    final AtomicInteger numFailedTests = new AtomicInteger();
    final File testOutputDir = project.getBuildDir().toPath().resolve(Paths.get(MoePlugin.MOE, "reports"))
            .toFile();
    if (test) {
        try {
            FileUtils.deleteFileOrFolder(testOutputDir);
        } catch (IOException e) {
            throw new GradleException(e.getMessage(), e);
        }
        if (!testOutputDir.exists() && !testOutputDir.mkdirs()) {
            throw new GradleException("Failed to create directory " + testOutputDir);
        }
        if (testOutputDir.exists() && !testOutputDir.isDirectory()) {
            throw new GradleException("Expected directory at " + testOutputDir);
        }
    }

    for (String udid : devices) {
        if (!options.launch && !options.installOnTarget) {
            continue;
        }
        task.getActions().add(t -> {
            // Get proper Xcode settings
            final Map<String, String> settings;
            if (xcodeBuildDev.getDidWork()) {
                settings = xcodeBuildDev.getXcodeBuildSettings();
            } else {
                settings = xcodeBuildDev.getCachedXcodeBuildSettings();
            }

            // Get app path
            String productName = settings.get("FULL_PRODUCT_NAME");
            if (settings.get("FULL_PRODUCT_NAME").endsWith("Tests.xctest")) {
                productName = productName.replace("Tests.xctest", ".app");
            }
            final File appPath = new File(settings.get("BUILT_PRODUCTS_DIR"), productName);

            final JUnitTestCollector testCollector;
            if (test && !options.rawTestOutput && options.launch) {
                testCollector = new JUnitTestCollector();
            } else {
                testCollector = null;
            }

            TaskUtils.javaexec(project, exec -> {
                // Create device launcher
                final DeviceLauncherBuilder builder = new DeviceLauncherBuilder()
                        .setWaitForDevice(options.waitForDevice);
                if (udid != null) {
                    builder.setUDID(udid);
                }
                if (options.debug != null) {
                    builder.setDebug(options.debug.local, options.debug.remote);
                }
                if (options.installOnTarget && !options.launch) {
                    builder.setInstallMode(InstallMode.UPGRADE_ONLY);
                } else if (!options.installOnTarget && options.launch) {
                    builder.setInstallMode(InstallMode.RUN_ONLY);
                }
                options.envs.forEach(builder::putEnvVar);
                options.vmargs.forEach(builder::addLaunchArgs);
                builder.addLaunchArgs("-args");
                options.args.forEach(builder::addLaunchArgs);
                options.proxies.forEach(p -> builder.addProxyPort(p.local, p.remote));
                builder.setAppPath(appPath).build(plugin, exec);

                if (testCollector != null) {
                    final JUnitTestCollectorWriter writer = new JUnitTestCollectorWriter(testCollector);
                    exec.setStandardOutput(writer);
                    exec.setErrorOutput(writer);
                } else {
                    exec.setStandardOutput(new StreamToLogForwarder(LOG, false));
                    exec.setErrorOutput(new StreamToLogForwarder(LOG, true));
                }
            });

            if (testCollector != null) {
                numFailedTests.getAndAdd(testCollector.getNumFailures() + testCollector.getNumErrors());
                writeJUnitReport(udid == null ? "unknown-device" : udid, testCollector, testOutputDir);
            }
        });
    }

    for (String udid : simulators) {
        if (!options.launch) {
            continue;
        }
        task.getActions().add(t -> {
            // Get proper Xcode settings
            final Map<String, String> settings;
            if (xcodeBuildSim.getDidWork()) {
                settings = xcodeBuildSim.getXcodeBuildSettings();
            } else {
                settings = xcodeBuildSim.getCachedXcodeBuildSettings();
            }

            // Get app path
            String productName = settings.get("FULL_PRODUCT_NAME");
            if (settings.get("FULL_PRODUCT_NAME").endsWith("Tests.xctest")) {
                productName = productName.replace("Tests.xctest", ".app");
            }
            final File appPath = new File(settings.get("BUILT_PRODUCTS_DIR"), productName);

            final JUnitTestCollector testCollector;
            if (test && !options.rawTestOutput) {
                testCollector = new JUnitTestCollector();
            } else {
                testCollector = null;
            }

            TaskUtils.exec(project, exec -> {
                // Create simulator launcher
                final SimulatorLauncherBuilder builder = new SimulatorLauncherBuilder();
                if (udid != null) {
                    builder.setUDID(udid);
                }
                if (options.debug != null) {
                    builder.setDebug(options.debug.local);
                }
                options.envs.forEach(builder::putEnvVar);
                options.vmargs.forEach(builder::addLaunchArgs);
                builder.addLaunchArgs("-args");
                options.args.forEach(builder::addLaunchArgs);
                builder.setAppPath(appPath).build(plugin, exec);

                if (testCollector != null) {
                    final JUnitTestCollectorWriter writer = new JUnitTestCollectorWriter(testCollector);
                    exec.setStandardOutput(writer);
                    exec.setErrorOutput(writer);
                } else {
                    exec.setStandardOutput(new StreamToLogForwarder(LOG, false));
                    exec.setErrorOutput(new StreamToLogForwarder(LOG, true));
                }
            });

            if (testCollector != null) {
                numFailedTests.getAndAdd(testCollector.getNumFailures() + testCollector.getNumErrors());
                writeJUnitReport(udid == null ? "unknown-simulator" : udid, testCollector, testOutputDir);
            }
        });
    }

    if (test) {
        task.getActions().add(t -> {
            if (numFailedTests.get() > 0) {
                throw new GradleException(numFailedTests.get()
                        + " tests failed on all targets combined, reports can be found here: " + testOutputDir);
            }
        });
    }
}

From source file:org.sakaiproject.tool.assessment.facade.ItemHashUtil.java

/**
 * Bit of a hack to allow reuse between {@link ItemFacadeQueries} and {@link PublishedItemFacadeQueries}.
 * Arguments are rather arbitrary extension points to support what we happen to <em>know</em> are the differences
 * between item and published item processing, as well as the common utilities/service dependencies.
 *
 * @param batchSize/*from   w  w  w.ja v a  2  s.  c o  m*/
 * @param hqlQueries
 * @param concreteType
 * @param hashAndAssignCallback
 * @param hibernateTemplate
 * @return
 */
BackfillItemHashResult backfillItemHashes(int batchSize, Map<String, String> hqlQueries,
        Class<? extends ItemDataIfc> concreteType, Function<ItemDataIfc, ItemDataIfc> hashAndAssignCallback,
        HibernateTemplate hibernateTemplate) {

    final long startTime = System.currentTimeMillis();
    log.debug("Hash backfill starting for items of type [" + concreteType.getSimpleName() + "]");

    if (batchSize <= 0) {
        batchSize = 100;
    }
    final int flushSize = batchSize;

    final AtomicInteger totalItems = new AtomicInteger(0);
    final AtomicInteger totalItemsNeedingBackfill = new AtomicInteger(0);
    final AtomicInteger batchNumber = new AtomicInteger(0);
    final AtomicInteger recordsRead = new AtomicInteger(0);
    final AtomicInteger recordsUpdated = new AtomicInteger(0);
    final Map<Long, Throwable> hashingErrors = new TreeMap<>();
    final Map<Integer, Throwable> otherErrors = new TreeMap<>();
    final List<Long> batchElapsedTimes = new ArrayList<>();
    // always needed as *printable* average per-batch timing value, so just store as string. and cache at this
    // scope b/c we sometimes need to print a single calculation multiple times, e.g. in last batch and
    // at method exit
    final AtomicReference<String> currentAvgBatchElapsedTime = new AtomicReference<>("0.00");
    final AtomicBoolean areMoreItems = new AtomicBoolean(true);

    // Get the item totals up front since a) we know any questions created while the job is running will be
    // assigned hashes and thus won't need to be handled by the job and b) makes bookkeeping within the job much
    // easier
    hibernateTemplate.execute(session -> {
        session.setDefaultReadOnly(true);
        totalItems.set(countItems(hqlQueries, session));
        totalItemsNeedingBackfill.set(countItemsNeedingHashBackfill(hqlQueries, session));
        log.debug("Hash backfill required for [" + totalItemsNeedingBackfill + "] of [" + totalItems
                + "] items of type [" + concreteType.getSimpleName() + "]");
        return null;
    });

    while (areMoreItems.get()) {
        long batchStartTime = System.currentTimeMillis();
        batchNumber.getAndIncrement();
        final AtomicInteger itemsHashedInBatch = new AtomicInteger(0);
        final AtomicInteger itemsReadInBatch = new AtomicInteger(0);
        final AtomicReference<Throwable> failure = new AtomicReference<>(null);

        // Idea here is a) avoid very long running transactions and b) avoid reading all items into memory
        // and c) avoid weirdness, e.g. duplicate results, when paginating complex hibernate objects. So
        // there's a per-batch transaction, and each batch re-runs the same two item lookup querys, one to
        // get the list of IDs for the next page of items, and one to resolve those IDs to items
        try {
            new TransactionTemplate(transactionManager, requireNewTransaction()).execute(status -> {
                hibernateTemplate.execute(session -> {
                    List<ItemDataIfc> itemsInBatch = null;
                    try { // resource cleanup block
                        session.setFlushMode(FlushMode.MANUAL);
                        try { // initial read block (failures here are fatal)

                            // set up the actual result set for this batch of items. use error count to skip over failed items
                            final List<Long> itemIds = itemIdsNeedingHashBackfill(hqlQueries, flushSize,
                                    hashingErrors.size(), session);
                            itemsInBatch = itemsById(itemIds, hqlQueries, session);

                        } catch (RuntimeException e) {
                            // Panic on failure to read counts and/or the actual items in the batch.
                            // Otherwise would potentially loop indefinitely since this design has no way way to
                            // skip this page of results.
                            log.error("Failed to read batch of hashable items. Giving up at record ["
                                    + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: ["
                                    + concreteType.getSimpleName() + "]", e);
                            areMoreItems.set(false); // force overall loop to exit
                            throw e; // force txn to give up
                        }

                        for (ItemDataIfc item : itemsInBatch) {
                            recordsRead.getAndIncrement();
                            itemsReadInBatch.getAndIncrement();

                            // Assign the item's hash/es
                            try {
                                log.debug("Backfilling hash for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + item.getItemId() + "]");
                                hashAndAssignCallback.apply(item);
                                itemsHashedInBatch.getAndIncrement();
                            } catch (Throwable t) {
                                // Failures considered ignorable here... probably some unexpected item state
                                // that prevented hash calculation.
                                //
                                // Re the log statement... yes, the caller probably logs exceptions, but likely
                                // without stack traces, and we'd like to advertise failures as quickly as possible,
                                // so we go ahead and emit an error log here.
                                log.error("Item hash calculation failed for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + (item == null ? "?" : item.getItemId()) + "]", t);
                                hashingErrors.put(item.getItemId(), t);
                            }

                        }
                        if (itemsHashedInBatch.get() > 0) {
                            session.flush();
                            recordsUpdated.getAndAdd(itemsHashedInBatch.get());
                        }
                        areMoreItems.set(itemsInBatch.size() >= flushSize);

                    } finally {
                        quietlyClear(session); // potentially very large, so clear aggressively
                    }
                    return null;
                }); // end session
                return null;
            }); // end transaction
        } catch (Throwable t) {
            // We're still in the loop over all batches, but something caused the current batch (and its
            // transaction) to exit abnormally. Logging of both success and failure cases is quite detailed,
            // and needs the same timing calcs, so is consolidated into the  'finally' block below.
            failure.set(t);
            otherErrors.put(batchNumber.get(), t);
        } finally {
            // Detailed batch-level reporting
            final long batchElapsed = (System.currentTimeMillis() - batchStartTime);
            batchElapsedTimes.add(batchElapsed);
            currentAvgBatchElapsedTime.set(new DecimalFormat("#.00")
                    .format(batchElapsedTimes.stream().collect(Collectors.averagingLong(l -> l))));
            if (failure.get() == null) {
                log.debug("Item hash backfill batch flushed to database. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items succeeded in batch: [" + itemsHashedInBatch + "] Total items attempted: ["
                        + recordsRead + "] Total items succeeded: [" + recordsUpdated
                        + "] Total attemptable items: [" + totalItemsNeedingBackfill + "] Elapsed batch time: ["
                        + batchElapsed + "ms] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]");
            } else {
                // yes, caller probably logs exceptions later, but probably without stack traces, and we'd
                // like to advertise failures as quickly as possible, so we go ahead and emit an error log
                // here.
                log.error("Item hash backfill failed. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items flushable (but failed) in batch: [" + itemsHashedInBatch
                        + "] Total items attempted: [" + recordsRead + "] Total items succeeded: ["
                        + recordsUpdated + "] Total attemptable items: [" + totalItemsNeedingBackfill
                        + "] Elapsed batch time: [" + batchElapsed + "ms] Avg time/batch: ["
                        + currentAvgBatchElapsedTime + "ms]", failure.get());
            }
        }
    } // end loop over all batches

    final long elapsedTime = System.currentTimeMillis() - startTime;
    log.debug("Hash backfill completed for items of type [" + concreteType.getSimpleName()
            + "]. Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated
            + "] Target attemptable items: [" + totalItemsNeedingBackfill + "] Total elapsed time: ["
            + elapsedTime + "ms] Total batches: [" + batchNumber + "] Avg time/batch: ["
            + currentAvgBatchElapsedTime + "ms]");

    return new BackfillItemHashResult(elapsedTime, totalItems.get(), totalItemsNeedingBackfill.get(),
            recordsRead.get(), recordsUpdated.get(), flushSize, hashingErrors, otherErrors);
}

From source file:org.springframework.amqp.rabbit.connection.CachingConnectionFactoryTests.java

@Test
public void testWithConnectionListener() throws Exception {

    com.rabbitmq.client.ConnectionFactory mockConnectionFactory = mock(
            com.rabbitmq.client.ConnectionFactory.class);
    com.rabbitmq.client.Connection mockConnection1 = mock(com.rabbitmq.client.Connection.class);
    when(mockConnection1.toString()).thenReturn("conn1");
    com.rabbitmq.client.Connection mockConnection2 = mock(com.rabbitmq.client.Connection.class);
    when(mockConnection2.toString()).thenReturn("conn2");
    Channel mockChannel = mock(Channel.class);

    when(mockConnectionFactory.newConnection(any(ExecutorService.class), anyString()))
            .thenReturn(mockConnection1, mockConnection2);
    when(mockConnection1.isOpen()).thenReturn(true);
    when(mockChannel.isOpen()).thenReturn(true);
    when(mockConnection1.createChannel()).thenReturn(mockChannel);
    when(mockConnection2.createChannel()).thenReturn(mockChannel);

    final AtomicReference<Connection> created = new AtomicReference<Connection>();
    final AtomicReference<Connection> closed = new AtomicReference<Connection>();
    final AtomicInteger timesClosed = new AtomicInteger(0);
    AbstractConnectionFactory connectionFactory = createConnectionFactory(mockConnectionFactory);
    connectionFactory.addConnectionListener(new ConnectionListener() {

        @Override/*from   w w w .ja  v a2  s  .co m*/
        public void onCreate(Connection connection) {
            created.set(connection);
        }

        @Override
        public void onClose(Connection connection) {
            closed.set(connection);
            timesClosed.getAndAdd(1);
        }
    });
    ((CachingConnectionFactory) connectionFactory).setChannelCacheSize(1);

    Connection con = connectionFactory.createConnection();
    Channel channel = con.createChannel(false);
    assertSame(con, created.get());
    channel.close();

    con.close();
    verify(mockConnection1, never()).close();

    Connection same = connectionFactory.createConnection();
    channel = con.createChannel(false);
    assertSame(con, same);
    channel.close();
    com.rabbitmq.client.Connection conDelegate = targetDelegate(con);

    when(mockConnection1.isOpen()).thenReturn(false);
    when(mockChannel.isOpen()).thenReturn(false); // force a connection refresh
    channel.basicCancel("foo");
    channel.close();
    assertEquals(1, timesClosed.get());

    Connection notSame = connectionFactory.createConnection();
    assertNotSame(conDelegate, targetDelegate(notSame));
    assertSame(con, closed.get());
    assertSame(notSame, created.get());

    connectionFactory.destroy();
    verify(mockConnection2, atLeastOnce()).close(anyInt());
    assertSame(notSame, closed.get());
    assertEquals(2, timesClosed.get());

    verify(mockConnectionFactory, times(2)).newConnection(any(ExecutorService.class), anyString());
}

From source file:org.springframework.yarn.am.allocate.DefaultAllocateCountTracker.java

/**
 * Gets the allocate counts which should be used
 * to create allocate requests./*from w w w .  j  a  v  a2  s  . c om*/
 *
 * @return the allocate counts
 */
public AllocateCountInfo getAllocateCounts() {
    AllocateCountInfo info = new AllocateCountInfo();
    HashMap<String, Integer> allocateCountMap = new HashMap<String, Integer>();

    int total = 0;

    // flush pending hosts from incoming to outgoing
    Iterator<Entry<String, AtomicInteger>> iterator = pendingHosts.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, AtomicInteger> entry = iterator.next();
        int value = entry.getValue().getAndSet(0);
        allocateCountMap.put(entry.getKey(), value);
        AtomicInteger out = requestedHosts.get(entry.getKey());
        if (out == null) {
            out = new AtomicInteger(value);
            requestedHosts.put(entry.getKey(), out);
        } else {
            out.getAndAdd(value);
        }
        total += out.get();
    }
    info.hostsInfo = allocateCountMap;

    allocateCountMap = new HashMap<String, Integer>();
    // flush pending racks from incoming to outgoing
    iterator = pendingRacks.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, AtomicInteger> entry = iterator.next();
        int value = entry.getValue().getAndSet(0);
        allocateCountMap.put(entry.getKey(), value);
        AtomicInteger out = requestedRacks.get(entry.getKey());
        if (out == null) {
            out = new AtomicInteger(value);
            requestedRacks.put(entry.getKey(), out);
        } else {
            out.getAndAdd(value);
        }
        total += out.get();
    }
    info.racksInfo = allocateCountMap;

    allocateCountMap = new HashMap<String, Integer>();
    // this is a point where allocation request gets tricky. Allocation will not happen
    // until "*" is sent as a hostname. Also count for "*" has to match total of hosts and
    // racks to be requested. Also count need to include any general "*" requests.
    // we'll try to calculate as accurate number as we can not to get too much
    // garbage if user is ramping up request throughout the AM lifecycle.
    int value = requestedAny.addAndGet(pendingAny.getAndSet(0));
    total += value;
    allocateCountMap.put("*", total);
    info.anysInfo = allocateCountMap;

    return info;
}