Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.nd4j.linalg.api.test.NDArrayTests.java

@Test
public void testVectorDimension() {
    INDArray test = Nd4j.create(Nd4j.linspace(1, 4, 4).data(), new int[] { 2, 2 });
    final AtomicInteger count = new AtomicInteger(0);
    //row wise/*w  w  w. j a va  2s .  c  o m*/
    test.iterateOverDimension(1, new SliceOp() {

        /**
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray firstDimension = Nd4j.create(new float[] { 1, 2 }, new int[] { 2 });
                assertEquals(firstDimension, test);
            } else {
                INDArray firstDimension = Nd4j.create(new float[] { 3, 4 }, new int[] { 2 });
                assertEquals(firstDimension, test);

            }

            count.incrementAndGet();
        }

    }, false);

    count.set(0);

    //columnwise
    test.iterateOverDimension(0, new SliceOp() {

        /**
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            log.info("Operator " + nd);
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray firstDimension = Nd4j.create(new float[] { 1, 3 }, new int[] { 2 });
                assertEquals(firstDimension, test);
            } else {
                INDArray firstDimension = Nd4j.create(new float[] { 2, 4 }, new int[] { 2 });
                assertEquals(firstDimension, test);
                firstDimension.data().destroy();

            }

            count.incrementAndGet();
        }

    }, false);

    test.data().destroy();

}

From source file:com.btoddb.fastpersitentqueue.InMemorySegmentMgrTest.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int entrySize = 1000;
    final int numEntries = 3000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxSegmentSizeInBytes(10000);
    mgr.init();/*  w w  w.  j a va2  s . c  o  m*/

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        pushSum.addAndGet(x);
                        FpqEntry entry = new FpqEntry(x, new byte[entrySize]);
                        mgr.push(entry);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !mgr.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = mgr.pop())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }

                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getNumberOfEntries(), is(0L));
    assertThat(mgr.getNumberOfActiveSegments(), is(1));
    assertThat(mgr.getSegments(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty()));

    // make sure we tested paging in/out
    assertThat(mgr.getNumberOfSwapOut(), is(greaterThan(0L)));
    assertThat(mgr.getNumberOfSwapIn(), is(mgr.getNumberOfSwapOut()));
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of getOrAssignStreamSegmentId to handle the TooManyActiveSegmentsException.
 *///from w w w .j  ava 2 s .c  om
@Test
public void testGetOrAssignStreamSegmentIdWithMetadataLimit() throws Exception {
    final String segmentName = "Segment";
    final String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
            UUID.randomUUID());

    HashSet<String> storageSegments = new HashSet<>();
    storageSegments.add(segmentName);
    storageSegments.add(transactionName);

    @Cleanup
    TestContext context = new TestContext();
    setupStorageGetHandler(context, storageSegments,
            name -> new StreamSegmentInformation(name, 0, false, false, new ImmutableDate()));

    // 1. Verify the behavior when even after the retry we still cannot map.
    AtomicInteger exceptionCounter = new AtomicInteger();
    AtomicBoolean cleanupInvoked = new AtomicBoolean();

    // We use 'containerId' as a proxy for the exception id (to make sure we collect the right one).
    context.operationLog.addHandler = op -> FutureHelpers
            .failedFuture(new TooManyActiveSegmentsException(exceptionCounter.incrementAndGet(), 0));
    Supplier<CompletableFuture<Void>> noOpCleanup = () -> {
        if (!cleanupInvoked.compareAndSet(false, true)) {
            return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times/"));
        }
        return CompletableFuture.completedFuture(null);
    };
    val mapper1 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore,
            noOpCleanup, context.storage, executorService());
    AssertExtensions.assertThrows(
            "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.",
            () -> mapper1.getOrAssignStreamSegmentId(segmentName, TIMEOUT),
            ex -> ex instanceof TooManyActiveSegmentsException
                    && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get());
    Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get());
    Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get());

    // Now with a transaction.
    exceptionCounter.set(0);
    cleanupInvoked.set(false);
    AssertExtensions.assertThrows(
            "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.",
            () -> mapper1.getOrAssignStreamSegmentId(transactionName, TIMEOUT),
            ex -> ex instanceof TooManyActiveSegmentsException
                    && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get());
    Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get());
    Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get());

    // 2. Verify the behavior when the first call fails, but the second one succeeds.
    exceptionCounter.set(0);
    cleanupInvoked.set(false);
    Supplier<CompletableFuture<Void>> workingCleanup = () -> {
        if (!cleanupInvoked.compareAndSet(false, true)) {
            return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times."));
        }

        setupOperationLog(context); // Setup the OperationLog to function correctly.
        return CompletableFuture.completedFuture(null);
    };

    val mapper2 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore,
            workingCleanup, context.storage, executorService());
    long id = mapper2.getOrAssignStreamSegmentId(segmentName, TIMEOUT).join();
    Assert.assertEquals("Unexpected number of attempts to map.", 1, exceptionCounter.get());
    Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get());
    Assert.assertNotEquals("No valid SegmentId assigned.", ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
}

From source file:com.bt.aloha.util.ConcurrentUpdateManagerTest.java

@Test
public void testConcurrentUpdateConflictAwawreGetsCalled() throws Exception {
    // setup/*from   w  w  w.j  av  a2  s  .co  m*/
    final CountDownLatch firstWriterRead = new CountDownLatch(1);
    final CountDownLatch secondWriterWrote = new CountDownLatch(1);
    final AtomicInteger failuresCounter = new AtomicInteger();

    ConcurrentUpdateBlock concurrentUpdateBlock = new ConflictAwareConcurrentUpdateBlock() {
        public void execute() {
            DialogInfo di = dialogCollection.get(dialogId);
            log.debug("First writer read");
            firstWriterRead.countDown();
            log.debug("Waiting for second writer to write");
            try {
                secondWriterWrote.await();
            } catch (InterruptedException e) {
                throw new RuntimeException(e.getMessage(), e);
            }
            dialogCollection.replace(di);
            log.debug("First writer replaced");
        }

        public String getResourceId() {
            return dialogId;
        }

        public void onConcurrentUpdateConflict() {
            failuresCounter.incrementAndGet();
        }
    };

    Runnable competingWriter = new Runnable() {
        public void run() {
            log.debug("Waiting for first writer to read");
            try {
                firstWriterRead.await();
            } catch (InterruptedException e) {
                throw new RuntimeException(e.getMessage(), e);
            }
            DialogInfo di = dialogCollection.get(dialogId);
            dialogCollection.replace(di);
            log.debug("Second writer replaced");
            secondWriterWrote.countDown();
        }
    };

    // act
    new Thread(competingWriter).start();
    concurrentUpdateManager.executeConcurrentUpdate(concurrentUpdateBlock);

    // assert
    assertEquals(1, failuresCounter.get());
}

From source file:com.neatresults.mgnltweaks.app.status.ConfigStatusPresenter.java

@Override
public void refreshData() {
    List<String> fails = new ArrayList<String>();
    final AtomicInteger totalCount = new AtomicInteger();
    final AtomicInteger absCount = new AtomicInteger();
    final AtomicInteger overrideCount = new AtomicInteger();
    try {/*  w w w  .ja  v a 2s  .c om*/
        Session session = MgnlContext.getJCRSession(RepositoryConstants.CONFIG);
        NodeIterator results = QueryUtil.search(RepositoryConstants.CONFIG,
                "select * from [nt:base] where extends is not null");
        filterData(results, n -> {
            try {
                String path = n.getProperty("extends").getString();
                if (StringUtils.startsWith(path, "/")) {
                    absCount.incrementAndGet();
                    return session.itemExists(path);
                } else if ("override".equals(path)) {
                    overrideCount.incrementAndGet();
                    return true;
                } else {
                    return session.itemExists(n.getPath() + "/" + path);
                }
            } catch (RepositoryException e) {
                log.debug("Ooops, error while checking existence of extends target for {} with {}", n,
                        e.getMessage(), e);
                return false;
            }
        }, t -> totalCount.incrementAndGet(), f -> {
            try {
                fails.add(f.getPath());
            } catch (RepositoryException e) {
                log.debug("Ooops, error while reporting misconfigured extends target for {} with {}", f,
                        e.getMessage(), e);
            }
        });
    } catch (RepositoryException e) {
        log.debug("Ooops, error while searching for extends targets with {}", e.getMessage(), e);
    }
    sourceData(ConfigStatusView.EXTENDS_FAIL_COUNT, "" + fails.size());
    sourceData(ConfigStatusView.EXTENDS_FAIL_LIST, fails);
    sourceData(ConfigStatusView.EXTENDS_COUNT, "" + totalCount.get());
    sourceData(ConfigStatusView.ABS_EXTENDS_COUNT, "" + absCount.get());
    sourceData(ConfigStatusView.REL_EXTENDS_COUNT,
            "" + (totalCount.get() - absCount.get() - overrideCount.get()));
    sourceData(ConfigStatusView.OVR_EXTENDS_COUNT, "" + overrideCount.get());
}

From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java

@Test
public void should_dsl_select_with_options() throws Exception {
    //Given// w w  w. j  a  v a2  s  .  co m
    final Map<String, Object> values = new HashMap<>();
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    values.put("id", id);
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
    dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
    final Date date1 = dateFormat.parse("2015-10-01 00:00:00 GMT");
    final Date date2 = dateFormat.parse("2015-10-02 00:00:00 GMT");
    final Date date3 = dateFormat.parse("2015-10-03 00:00:00 GMT");
    final Date date6 = dateFormat.parse("2015-10-06 00:00:00 GMT");

    values.put("date1", "'2015-10-01 00:00:00+0000'");
    values.put("date2", "'2015-10-02 00:00:00+0000'");
    values.put("date3", "'2015-10-03 00:00:00+0000'");
    values.put("date4", "'2015-10-04 00:00:00+0000'");
    values.put("date5", "'2015-10-05 00:00:00+0000'");
    values.put("date6", "'2015-10-06 00:00:00+0000'");
    values.put("date7", "'2015-10-07 00:00:00+0000'");
    values.put("date8", "'2015-10-08 00:00:00+0000'");
    values.put("date9", "'2015-10-09 00:00:00+0000'");
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_many_rows.cql", values);

    final AtomicInteger rsCount = new AtomicInteger(0);
    final AtomicInteger rowCounter = new AtomicInteger(0);

    final CassandraLogAsserter logAsserter = new CassandraLogAsserter();
    logAsserter.prepareLogLevelForDriverConnection();

    //When
    final List<SimpleEntity> found = manager.dsl().select().value().fromBaseTable().where().id_Eq(id)
            .date_IN(date1, date2, date3, date6).orderByDateDescending().limit(3).withConsistencyLevel(THREE)
            .withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE).withResultSetAsyncListener(rs -> {
                rsCount.getAndSet(rs.getAvailableWithoutFetching());
                return rs;
            }).withRowAsyncListener(row -> {
                rowCounter.getAndIncrement();
                return row;
            }).getList();

    //Then
    assertThat(found).hasSize(3);
    assertThat(found.get(0).getValue()).isEqualTo("id - date6");
    assertThat(found.get(1).getValue()).isEqualTo("id - date3");
    assertThat(found.get(2).getValue()).isEqualTo("id - date2");
    assertThat(rsCount.get()).isEqualTo(3);
    assertThat(rowCounter.get()).isEqualTo(3);
    logAsserter.assertConsistencyLevels(THREE, ONE);
}

From source file:org.apache.hadoop.hbase.client.TestClientOperationInterrupt.java

@Test
public void testInterrupt50Percent() throws IOException, InterruptedException {
    final AtomicInteger noEx = new AtomicInteger(0);
    final AtomicInteger badEx = new AtomicInteger(0);
    final AtomicInteger noInt = new AtomicInteger(0);
    final AtomicInteger done = new AtomicInteger(0);
    List<Thread> threads = new ArrayList<Thread>();

    final int nbThread = 100;

    for (int i = 0; i < nbThread; i++) {
        Thread t = new Thread() {
            @Override// w  w w . ja  v  a  2s.c o m
            public void run() {
                try {
                    HTable ht = new HTable(conf, tableName);
                    Result r = ht.get(new Get(row1));
                    noEx.incrementAndGet();
                } catch (IOException e) {
                    LOG.info("exception", e);
                    if (!(e instanceof InterruptedIOException) || (e instanceof SocketTimeoutException)) {
                        badEx.incrementAndGet();
                    } else {
                        if (Thread.currentThread().isInterrupted()) {
                            noInt.incrementAndGet();
                            LOG.info("The thread should NOT be with the 'interrupt' status.");
                        }
                    }
                } finally {
                    done.incrementAndGet();
                }
            }
        };
        t.setName("TestClientOperationInterrupt #" + i);
        threads.add(t);
        t.start();
    }

    for (int i = 0; i < nbThread / 2; i++) {
        threads.get(i).interrupt();
    }

    boolean stillAlive = true;
    while (stillAlive) {
        stillAlive = false;
        for (Thread t : threads) {
            if (t.isAlive()) {
                stillAlive = true;
            }
        }
        Threads.sleep(10);
    }

    Assert.assertFalse(Thread.currentThread().isInterrupted());

    Assert.assertTrue(" noEx: " + noEx.get() + ", badEx=" + badEx.get() + ", noInt=" + noInt.get(),
            noEx.get() == nbThread / 2 && badEx.get() == 0);

    // The problem here is that we need the server to free its handlers to handle all operations
    while (done.get() != nbThread) {
        Thread.sleep(1);
    }

    HTable ht = new HTable(conf, tableName);
    Result r = ht.get(new Get(row1));
    Assert.assertFalse(r.isEmpty());
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldFireKeepAlive() throws Exception {
    final AtomicInteger keepAliveEventCounter = new AtomicInteger();
    final AtomicReference<ChannelHandlerContext> ctxRef = new AtomicReference();

    QueryHandler testHandler = new QueryHandler(endpoint, responseRingBuffer, queue, false) {
        @Override//  w  ww . j a  v a 2s.  c  o m
        public void channelRegistered(ChannelHandlerContext ctx) throws Exception {
            super.channelRegistered(ctx);
            ctxRef.compareAndSet(null, ctx);
        }

        @Override
        protected void onKeepAliveFired(ChannelHandlerContext ctx, CouchbaseRequest keepAliveRequest) {
            assertEquals(1, keepAliveEventCounter.incrementAndGet());
        }

        @Override
        protected void onKeepAliveResponse(ChannelHandlerContext ctx, CouchbaseResponse keepAliveResponse) {
            assertEquals(2, keepAliveEventCounter.incrementAndGet());
        }
    };
    EmbeddedChannel channel = new EmbeddedChannel(testHandler);

    //test idle event triggers a query keepAlive request and hook is called
    testHandler.userEventTriggered(ctxRef.get(), IdleStateEvent.FIRST_ALL_IDLE_STATE_EVENT);

    assertEquals(1, keepAliveEventCounter.get());
    assertTrue(queue.peek() instanceof QueryHandler.KeepAliveRequest);
    QueryHandler.KeepAliveRequest keepAliveRequest = (QueryHandler.KeepAliveRequest) queue.peek();

    //test responding to the request with http response is interpreted into a KeepAliveResponse and hook is called
    HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND);
    LastHttpContent responseEnd = new DefaultLastHttpContent();
    channel.writeInbound(response, responseEnd);
    QueryHandler.KeepAliveResponse keepAliveResponse = keepAliveRequest.observable()
            .cast(QueryHandler.KeepAliveResponse.class).timeout(1, TimeUnit.SECONDS).toBlocking().single();

    ReferenceCountUtil.releaseLater(response);
    ReferenceCountUtil.releaseLater(responseEnd);

    assertEquals(2, keepAliveEventCounter.get());
    assertEquals(ResponseStatus.NOT_EXISTS, keepAliveResponse.status());
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldDecodeNRowResponse() throws Exception {
    String response = Resources.read("success_5.json", this.getClass());
    HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
            new HttpResponseStatus(200, "OK"));
    HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8));

    GenericQueryRequest requestMock = mock(GenericQueryRequest.class);
    queue.add(requestMock);/*from w ww  .j a va 2s .co m*/
    channel.writeInbound(responseHeader, responseChunk);
    latch.await(1, TimeUnit.SECONDS);
    assertEquals(1, firedEvents.size());
    GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0);

    final AtomicInteger found = new AtomicInteger(0);
    assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, FAKE_CLIENTID, "success",
            FAKE_SIGNATURE, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf row) {
                    found.incrementAndGet();
                    String content = row.toString(CharsetUtil.UTF_8);
                    row.release();
                    assertNotNull(content);
                    assertTrue(!content.isEmpty());
                    try {
                        Map decoded = mapper.readValue(content, Map.class);
                        assertTrue(decoded.size() > 0);
                        assertTrue(decoded.containsKey("name"));
                    } catch (Exception e) {
                        assertTrue(false);
                    }
                }
            }, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf buf) {
                    fail("no error expected");
                }
            }, expectedMetricsCounts(0, 5));
    assertEquals(5, found.get());
}