Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test //(timeout=10000)
public void validateWithReplyListener() throws Exception {
    final AtomicInteger repliesCounter = new AtomicInteger();
    YarnApplication<DataProcessor> yarnApplication = YarnAssembly
            .forApplicationContainer(SimpleEchoContainer.class).containerCount(2).memory(512)
            .withApplicationMaster().maxAttempts(2).priority(2).build("sample-yarn-application");
    yarnApplication.registerReplyListener(new ContainerReplyListener() {
        @Override/*from w w  w  .jav a 2 s . c om*/
        public void onReply(ByteBuffer replyData) {
            repliesCounter.incrementAndGet();
        }
    });

    DataProcessor dataProcessor = yarnApplication.launch();

    assertEquals(2, dataProcessor.containers());

    for (int i = 0; i < 2; i++) {
        for (int j = 0; j < dataProcessor.containers(); j++) {
            dataProcessor.process(ByteBuffer.wrap(("Hello Yarn!-" + i).getBytes()));
        }
    }
    assertTrue(yarnApplication.isRunning());
    yarnApplication.shutDown();
    assertEquals(repliesCounter.get(), dataProcessor.completedSinceStart());
    assertFalse(yarnApplication.isRunning());
}

From source file:com.netflix.discovery.shared.Applications.java

/**
 * Populates the provided instance count map.  The instance count map is used as part of the general
 * app list synchronization mechanism.//from   w  w w . jav  a  2s  . c o m
 * @param instanceCountMap the map to populate
 */
public void populateInstanceCountMap(TreeMap<String, AtomicInteger> instanceCountMap) {
    for (Application app : this.getRegisteredApplications()) {
        for (InstanceInfo info : app.getInstancesAsIsFromEureka()) {
            AtomicInteger instanceCount = instanceCountMap.get(info.getStatus().name());
            if (instanceCount == null) {
                instanceCount = new AtomicInteger(0);
                instanceCountMap.put(info.getStatus().name(), instanceCount);
            }
            instanceCount.incrementAndGet();
        }
    }
}

From source file:org.apache.solr.client.solrj.TestSolrJErrorHandling.java

void doThreads(final HttpSolrClient client, final int numThreads, final int numRequests) throws Exception {
    final AtomicInteger tries = new AtomicInteger(0);

    List<Thread> threads = new ArrayList<>();

    for (int i = 0; i < numThreads; i++) {
        final int threadNum = i;
        threads.add(new Thread() {
            int reqLeft = numRequests;

            @Override// ww  w .  j  a v a  2s. c o m
            public void run() {
                try {
                    while (--reqLeft >= 0) {
                        tries.incrementAndGet();
                        doSingle(client, threadNum);
                    }
                } catch (Throwable e) {
                    // Allow thread to exit, we should have already recorded the exception.
                }
            }
        });
    }

    for (Thread thread : threads) {
        thread.start();
    }
    for (Thread thread : threads) {
        thread.join();
    }

    showExceptions();

    int count = getCount(client);
    if (count > tries.get()) {
        fail("Number of requests was " + tries.get() + " but final count was " + count);
    }

    assertEquals(tries.get(), getCount(client));

    assertTrue("got unexpected exceptions. ", unexpected.isEmpty());
}

From source file:com.ikanow.aleph2.analytics.services.TestGraphBuilderEnrichmentService.java

@Test
public void test_delegation() {
    final AtomicInteger wrapper_counter = new AtomicInteger(0);
    final AtomicInteger emit_counter = new AtomicInteger(0);
    final AtomicInteger init_counter = new AtomicInteger(0);
    final AtomicInteger done_counter = new AtomicInteger(0);

    final Streamable<Tuple2<Long, IBatchRecord>> test_stream = Streamable
            .of(Arrays.asList(_mapper.createObjectNode()))
            .<Tuple2<Long, IBatchRecord>>map(j -> Tuples._2T(0L, new BatchRecordUtils.JsonBatchRecord(j)));

    final IEnrichmentBatchModule delegate = Mockito.mock(IEnrichmentBatchModule.class);
    Mockito.doAnswer(__ -> {/*  w w w .ja v a 2 s.c  o m*/
        init_counter.incrementAndGet();
        return null;
    }).when(delegate).onStageInitialize(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(),
            Mockito.any());
    Mockito.doAnswer(in -> {
        @SuppressWarnings("unchecked")
        final Stream<Tuple2<Long, IBatchRecord>> stream = (Stream<Tuple2<Long, IBatchRecord>>) in
                .getArguments()[0];
        stream.forEach(t2 -> emit_counter.incrementAndGet());
        return null;
    }).when(delegate).onObjectBatch(Mockito.any(), Mockito.any(), Mockito.any());
    Mockito.doAnswer(__ -> {
        done_counter.incrementAndGet();
        return null;
    }).when(delegate).onStageComplete(Mockito.anyBoolean());
    Mockito.when(delegate.cloneForNewGrouping()).thenReturn(delegate);
    Mockito.when(delegate.validateModule(Mockito.any(), Mockito.any(), Mockito.any()))
            .thenReturn(Arrays.asList(ErrorUtils.buildErrorMessage("", "", "")));

    final IGraphService throwing_graph_service = Mockito.mock(IGraphService.class);
    Mockito.when(throwing_graph_service.getUnderlyingPlatformDriver(Mockito.any(), Mockito.any()))
            .thenReturn(Optional.of(delegate));
    final MockServiceContext mock_service_context = new MockServiceContext();
    mock_service_context.addService(IGraphService.class, Optional.empty(), throwing_graph_service);
    final IEnrichmentModuleContext enrich_context = Mockito.mock(IEnrichmentModuleContext.class);
    Mockito.when(enrich_context.getServiceContext()).thenReturn(mock_service_context);
    Mockito.when(enrich_context.emitImmutableObject(Mockito.anyLong(), Mockito.any(), Mockito.any(),
            Mockito.any(), Mockito.any())).thenAnswer(invocation -> {
                wrapper_counter.incrementAndGet();
                return null;
            });

    final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class).done().get();
    final DataBucketBean bucket = BeanTemplateUtils
            .build(DataBucketBean.class).with(DataBucketBean::data_schema, BeanTemplateUtils
                    .build(DataSchemaBean.class).with(DataSchemaBean::graph_schema, graph_schema).done().get())
            .done().get();
    final EnrichmentControlMetadataBean control = BeanTemplateUtils.build(EnrichmentControlMetadataBean.class)
            .done().get();

    final GraphBuilderEnrichmentService under_test = new GraphBuilderEnrichmentService();
    under_test.onStageInitialize(enrich_context, bucket, control, Tuples._2T(null, null), Optional.empty());
    under_test.onObjectBatch(test_stream.stream(), Optional.empty(), Optional.empty());
    under_test.onStageComplete(true);
    assertEquals(delegate, under_test.cloneForNewGrouping());
    assertEquals(1, under_test.validateModule(enrich_context, bucket, control).size());
    assertEquals(1, emit_counter.getAndSet(0));
    assertEquals(1, init_counter.getAndSet(0));
    assertEquals(1, done_counter.getAndSet(0));
    assertEquals(1, wrapper_counter.getAndSet(0));
}

From source file:com.github.brandtg.switchboard.TestMysqlLogServer.java

@Test
public void testMysqlEventListener() throws Exception {
    try (Connection conn = DriverManager.getConnection(jdbc, "root", "")) {
        // Write some rows, so we have binlog entries
        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO simple VALUES(?, ?)");
        for (int i = 0; i < 10; i++) {
            pstmt.setInt(1, i);// w w w. ja  va2 s .  c o m
            pstmt.setInt(2, i);
            pstmt.execute();
        }
    }

    final AtomicInteger insertCount = new AtomicInteger();
    final AtomicInteger beginCount = new AtomicInteger();
    final AtomicInteger commitCount = new AtomicInteger();
    final AtomicInteger rollbackCount = new AtomicInteger();
    InetSocketAddress sourceAddress = new InetSocketAddress(8080);
    InetSocketAddress sinkAddress = new InetSocketAddress(9090);
    MysqlEventListener eventListener = new MysqlEventListener("test", sourceAddress, sinkAddress) {
        @Override
        public void onBegin(UUID sourceId, long transactionId) {
            beginCount.incrementAndGet();
        }

        @Override
        public void onInsert(List<Row> rows) {
            insertCount.incrementAndGet();
        }

        @Override
        public void onUpdate(List<Pair<Row>> rows) {

        }

        @Override
        public void onDelete(List<Row> rows) {

        }

        @Override
        public void onCommit() {
            commitCount.incrementAndGet();
        }

        @Override
        public void onRollback() {
            rollbackCount.incrementAndGet();
        }
    };

    try {
        eventListener.start();

        long startTime = System.currentTimeMillis();
        long currentTime = startTime;
        do {
            // Once we've seen all writes, check expected state
            if (insertCount.get() == 10) {
                Assert.assertEquals(beginCount.get(), 10);
                Assert.assertEquals(commitCount.get(), 10);
                Assert.assertEquals(rollbackCount.get(), 0);
                return;
            }
            Thread.sleep(pollMillis);
            currentTime = System.currentTimeMillis();
        } while (currentTime - startTime < timeoutMillis);
    } finally {
        eventListener.shutdown();
    }

    Assert.fail("Timed out while polling");
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testCheckAndDelete() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 10;
    CountDownLatch putLatch = new CountDownLatch(count + 1);
    table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown());
    IntStream.range(0, count)//w  w w .j  a  va 2  s  .  c  om
            .forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE))
                    .thenRun(() -> putLatch.countDown()));
    putLatch.await();

    AtomicInteger successCount = new AtomicInteger(0);
    AtomicInteger successIndex = new AtomicInteger(-1);
    CountDownLatch deleteLatch = new CountDownLatch(count);
    IntStream.range(0, count).forEach(i -> table
            .checkAndDelete(row, FAMILY, QUALIFIER, VALUE,
                    new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i)))
            .thenAccept(x -> {
                if (x) {
                    successCount.incrementAndGet();
                    successIndex.set(i);
                }
                deleteLatch.countDown();
            }));
    deleteLatch.await();
    assertEquals(1, successCount.get());
    Result result = table.get(new Get(row)).get();
    IntStream.range(0, count).forEach(i -> {
        if (i == successIndex.get()) {
            assertFalse(result.containsColumn(FAMILY, concat(QUALIFIER, i)));
        } else {
            assertArrayEquals(VALUE, result.getValue(FAMILY, concat(QUALIFIER, i)));
        }
    });
}

From source file:org.apache.cassandra.db.commitlog.CommitLogReplayer.java

/**
 * Deserializes and replays a commit log entry.
 *//*ww  w . j  av  a  2 s .c o m*/
void replayMutation(byte[] inputBuffer, int size, final long entryLocation, final CommitLogDescriptor desc)
        throws IOException {

    final Mutation mutation;
    try (FastByteArrayInputStream bufIn = new FastByteArrayInputStream(inputBuffer, 0, size)) {
        mutation = Mutation.serializer.deserialize(new DataInputStream(bufIn), desc.getMessagingVersion(),
                ColumnSerializer.Flag.LOCAL);
        // doublecheck that what we read is [still] valid for the current schema
        for (ColumnFamily cf : mutation.getColumnFamilies())
            for (Cell cell : cf)
                cf.getComparator().validate(cell.name());
    } catch (UnknownColumnFamilyException ex) {
        if (ex.cfId == null)
            return;
        AtomicInteger i = invalidMutations.get(ex.cfId);
        if (i == null) {
            i = new AtomicInteger(1);
            invalidMutations.put(ex.cfId, i);
        } else
            i.incrementAndGet();
        return;
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        File f = File.createTempFile("mutation", "dat");

        try (DataOutputStream out = new DataOutputStream(new FileOutputStream(f))) {
            out.write(inputBuffer, 0, size);
        }

        // Checksum passed so this error can't be permissible.
        handleReplayError(false, "Unexpected error deserializing mutation; saved to %s.  "
                + "This may be caused by replaying a mutation against a table with the same name but incompatible schema.  "
                + "Exception follows: %s", f.getAbsolutePath(), t);
        return;
    }

    if (logger.isTraceEnabled())
        logger.trace("replaying mutation for {}.{}: {}", mutation.getKeyspaceName(),
                ByteBufferUtil.bytesToHex(mutation.key()),
                "{" + StringUtils.join(mutation.getColumnFamilies().iterator(), ", ") + "}");

    Runnable runnable = new WrappedRunnable() {
        public void runMayThrow() throws IOException {
            if (Schema.instance.getKSMetaData(mutation.getKeyspaceName()) == null)
                return;
            if (pointInTimeExceeded(mutation))
                return;

            final Keyspace keyspace = Keyspace.open(mutation.getKeyspaceName());

            // Rebuild the mutation, omitting column families that
            //    a) the user has requested that we ignore,
            //    b) have already been flushed,
            // or c) are part of a cf that was dropped.
            // Keep in mind that the cf.name() is suspect. do every thing based on the cfid instead.
            Mutation newMutation = null;
            for (ColumnFamily columnFamily : replayFilter.filter(mutation)) {
                if (Schema.instance.getCF(columnFamily.id()) == null)
                    continue; // dropped

                ReplayPosition rp = cfPositions.get(columnFamily.id());

                // replay if current segment is newer than last flushed one or,
                // if it is the last known segment, if we are after the replay position
                if (desc.id > rp.segment || (desc.id == rp.segment && entryLocation > rp.position)) {
                    if (newMutation == null)
                        newMutation = new Mutation(mutation.getKeyspaceName(), mutation.key());
                    newMutation.add(columnFamily);
                    replayedCount.incrementAndGet();
                }
            }
            if (newMutation != null) {
                assert !newMutation.isEmpty();
                Keyspace.open(newMutation.getKeyspaceName()).apply(newMutation, false);
                keyspacesRecovered.add(keyspace);
            }
        }
    };
    futures.add(StageManager.getStage(Stage.MUTATION).submit(runnable));
    if (futures.size() > MAX_OUTSTANDING_REPLAY_COUNT) {
        FBUtilities.waitOnFutures(futures);
        futures.clear();
    }
}

From source file:org.apache.hadoop.hbase.client.TestClientOperationInterrupt.java

@Test
public void testInterrupt50Percent() throws IOException, InterruptedException {
    final AtomicInteger noEx = new AtomicInteger(0);
    final AtomicInteger badEx = new AtomicInteger(0);
    final AtomicInteger noInt = new AtomicInteger(0);
    final AtomicInteger done = new AtomicInteger(0);
    List<Thread> threads = new ArrayList<Thread>();

    final int nbThread = 100;

    for (int i = 0; i < nbThread; i++) {
        Thread t = new Thread() {
            @Override/*from  www .  ja  va  2  s  . c  o m*/
            public void run() {
                try {
                    HTable ht = new HTable(conf, tableName);
                    Result r = ht.get(new Get(row1));
                    noEx.incrementAndGet();
                } catch (IOException e) {
                    LOG.info("exception", e);
                    if (!(e instanceof InterruptedIOException) || (e instanceof SocketTimeoutException)) {
                        badEx.incrementAndGet();
                    } else {
                        if (Thread.currentThread().isInterrupted()) {
                            noInt.incrementAndGet();
                            LOG.info("The thread should NOT be with the 'interrupt' status.");
                        }
                    }
                } finally {
                    done.incrementAndGet();
                }
            }
        };
        t.setName("TestClientOperationInterrupt #" + i);
        threads.add(t);
        t.start();
    }

    for (int i = 0; i < nbThread / 2; i++) {
        threads.get(i).interrupt();
    }

    boolean stillAlive = true;
    while (stillAlive) {
        stillAlive = false;
        for (Thread t : threads) {
            if (t.isAlive()) {
                stillAlive = true;
            }
        }
        Threads.sleep(10);
    }

    Assert.assertFalse(Thread.currentThread().isInterrupted());

    Assert.assertTrue(" noEx: " + noEx.get() + ", badEx=" + badEx.get() + ", noInt=" + noInt.get(),
            noEx.get() == nbThread / 2 && badEx.get() == 0);

    // The problem here is that we need the server to free its handlers to handle all operations
    while (done.get() != nbThread) {
        Thread.sleep(1);
    }

    HTable ht = new HTable(conf, tableName);
    Result r = ht.get(new Get(row1));
    Assert.assertFalse(r.isEmpty());
}

From source file:co.cask.cdap.security.server.ExternalAuthenticationServer.java

@Override
protected Executor executor(State state) {
    final AtomicInteger id = new AtomicInteger();
    //noinspection NullableProblems
    final Thread.UncaughtExceptionHandler h = new Thread.UncaughtExceptionHandler() {
        @Override//from  w  ww  .  ja  v a2s.  c  o  m
        public void uncaughtException(Thread t, Throwable e) {
        }
    };
    return new Executor() {
        @Override
        public void execute(Runnable runnable) {
            Thread t = new Thread(runnable,
                    String.format("ExternalAuthenticationServer-%d", id.incrementAndGet()));
            t.setUncaughtExceptionHandler(h);
            t.start();
        }
    };
}

From source file:com.pepaproch.gtswsdl.client.RateLimitTest.java

private synchronized void addTask(AtomicInteger cc, ScheduledExecutorService schelduler, RateLimit rate,
        Instant[] end) {//w  ww.  ja  v a 2s  . co m

    Callable<Integer> callable = (Callable<Integer>) () -> {

        return cc.get();
    };
    ListenableFutureTask request = new ListenableFutureTask(callable);

    schelduler.schedule(() -> {
        FutureTask<?> schelduledTask = request;
        if (!request.isCancelled() && !request.isDone()) {
            schelduledTask.run();
        }

    }, rate.consumeSlot(), TimeUnit.MILLISECONDS);

    request.addCallback(new ListenableFutureCallback<Integer>() {

        @Override
        public void onSuccess(Integer result) {
            cc.incrementAndGet();
            end[0] = Instant.now();
            System.out.println("FINISHEDLISTENBALE: " + result + end[0].toString());
        }

        @Override
        public void onFailure(Throwable ex) {
            System.out.println("FAILURE");
        }
    });

}