Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.fcrepo.client.ConnectionManagementTest.java

/**
 * Demonstrates that HTTP connections are released when the FcrepoClient throws an exception.  Each method of the
 * FcrepoClient (get, put, post, etc.) is tested.
 */// w  ww. j  a  va2  s. c om
@Test
public void connectionReleasedOnException() {
    // Removing MOVE and COPY operations as the mock server does not handle them
    final int expectedCount = HttpMethods.values().length - 2;
    final AtomicInteger actualCount = new AtomicInteger(0);
    final MockHttpExpectations.Uris uri = uris.uri500;

    Stream.of(HttpMethods.values())
            // MOVE and COPY do not appear to be supported in the mock server
            .filter(method -> HttpMethods.MOVE != method && HttpMethods.COPY != method).forEach(method -> {
                connect(client, uri, method, null);
                actualCount.getAndIncrement();
            });

    assertEquals("Expected to make " + expectedCount + " connections; made " + actualCount.get(), expectedCount,
            actualCount.get());

    verifyConnectionRequestedAndClosed(actualCount.get(), connectionManager);
}

From source file:com.cyngn.vertx.bosun.BosunReporterTests.java

@Test
public void testSendMany(TestContext context) throws Exception {
    JsonObject metric = new JsonObject();
    metric.put("action", BosunReporter.INDEX_COMMAND);
    metric.put("metric", "test.value");
    metric.put("value", "34.4");
    metric.put("tags", new JsonObject().put("foo", "bar"));

    int totalMessages = 10000;
    AtomicInteger count = new AtomicInteger(0);
    Async async = context.async();/*  w  w  w.  ja v  a2s.com*/

    AtomicInteger okCount = new AtomicInteger(0);

    Handler<AsyncResult<Message<JsonObject>>> handler = result -> {
        if (result.failed()) {
            context.fail();
        }

        String response = result.result().body().getString(BosunReporter.RESULT_FIELD);
        if (StringUtils.equals(BosunResponse.OK_MSG, response)) {
            okCount.incrementAndGet();
        } else if (StringUtils.equals(BosunResponse.EXISTS_MSG, response)) {
        } else {
            context.fail();
        }

        if (count.incrementAndGet() == totalMessages) {
            if (okCount.get() != 1) {
                context.fail();
                return;
            }
            async.complete();
        }
    };

    for (int i = 0; i < totalMessages; i++) {
        eb.send(topic, metric, new DeliveryOptions(), handler);
    }
}

From source file:com.spectralogic.ds3client.metadata.MetadataReceivedListenerImpl_Test.java

@Test
public void testGettingMetadataFailureHandler() throws IOException, InterruptedException {
    Assume.assumeFalse(Platform.isWindows());

    try {//  w  w w .jav a2 s.  c  om
        final String tempPathPrefix = null;
        final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

        final String fileName = "Gracie.txt";

        final Path filePath = Files.createFile(Paths.get(tempDirectory.toString(), fileName));

        try {
            // set permissions
            if (!Platform.isWindows()) {
                final PosixFileAttributes attributes = Files.readAttributes(filePath,
                        PosixFileAttributes.class);
                final Set<PosixFilePermission> permissions = attributes.permissions();
                permissions.clear();
                permissions.add(PosixFilePermission.OWNER_READ);
                permissions.add(PosixFilePermission.OWNER_WRITE);
                Files.setPosixFilePermissions(filePath, permissions);
            }

            // get permissions
            final ImmutableMap.Builder<String, Path> fileMapper = ImmutableMap.builder();
            fileMapper.put(filePath.toString(), filePath);
            final Map<String, String> metadataFromFile = new MetadataAccessImpl(fileMapper.build())
                    .getMetadataValue(filePath.toString());

            FileUtils.deleteDirectory(tempDirectory.toFile());

            // put old permissions back
            final Metadata metadata = new MetadataImpl(new MockedHeadersReturningKeys(metadataFromFile));

            final AtomicInteger numTimesFailureHandlerCalled = new AtomicInteger(0);

            new MetadataReceivedListenerImpl(tempDirectory.toString(), new FailureEventListener() {
                @Override
                public void onFailure(final FailureEvent failureEvent) {
                    numTimesFailureHandlerCalled.incrementAndGet();
                    assertEquals(FailureEvent.FailureActivity.RestoringMetadata, failureEvent.doingWhat());
                }
            }, "localhost").metadataReceived(fileName, metadata);

            assertEquals(1, numTimesFailureHandlerCalled.get());
        } finally {
            FileUtils.deleteDirectory(tempDirectory.toFile());
        }
    } catch (final Throwable t) {
        fail("Throwing exceptions from metadata est verbotten");
    }
}

From source file:com.adobe.acs.commons.httpcache.store.jcr.impl.visitor.mock.RootNodeMockFactory.java

private Node[] generateBucketNodeChain(Node rootNode, boolean isEmpty) throws RepositoryException {
    final Node[] bucketNodeChain = new Node[settings.bucketDepth];
    Node currentParentNode = rootNode;
    for (int i = 0; i < settings.bucketDepth; i++) {
        final Node node = mockStandardNode("bucketnode" + (isEmpty ? "-empty" : "") + "-level-" + (i + 1));
        bucketNodeChain[i] = node;//w  w  w  . j  a  v a 2  s.  c  om

        when(node.getParent()).thenReturn(currentParentNode);
        when(node.getProperties()).thenReturn(new MockPropertyIterator(IteratorUtils.EMPTY_ITERATOR));
        when(node.hasProperty(JCRHttpCacheStoreConstants.PN_ISCACHEENTRYNODE)).thenReturn(false);
        when(node.hasProperty(JCRHttpCacheStoreConstants.PN_ISBUCKETNODE)).thenReturn(true);

        currentParentNode = node;
    }

    for (int i = 0; i < settings.bucketDepth; i++) {
        if (i < settings.bucketDepth) {
            final Node node = bucketNodeChain[i];
            final Node childNode = bucketNodeChain[i];
            final AtomicInteger deleteCounter = new AtomicInteger();

            doAnswer(new Answer<Object>() {
                @Override
                public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
                    deleteCounter.getAndIncrement();
                    return null;
                }
            }).when(node).remove();

            when(node.getParent().getNodes()).thenAnswer(new Answer<NodeIterator>() {
                @Override
                public NodeIterator answer(InvocationOnMock invocationOnMock) throws Throwable {
                    if (deleteCounter.get() > 0) {
                        return new MockNodeIterator();
                    } else {
                        return new MockNodeIterator(new Node[] { childNode });
                    }
                }
            });

        }
    }

    return bucketNodeChain;
}

From source file:org.apache.bookkeeper.bookie.LedgerCacheTest.java

/**
 * Race where a flush would fail because a garbage collection occurred at
 * the wrong time./*w w  w  .  j a va2  s . co m*/
 * {@link https://issues.apache.org/jira/browse/BOOKKEEPER-604}
 */
@Test(timeout = 60000)
public void testFlushDeleteRace() throws Exception {
    newLedgerCache();
    final AtomicInteger rc = new AtomicInteger(0);
    final LinkedBlockingQueue<Long> ledgerQ = new LinkedBlockingQueue<Long>(1);
    final byte[] masterKey = "masterKey".getBytes();
    Thread newLedgerThread = new Thread() {
        public void run() {
            try {
                for (int i = 0; i < 1000 && rc.get() == 0; i++) {
                    ledgerCache.setMasterKey(i, masterKey);
                    ledgerQ.put((long) i);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in new ledger thread", e);
            }
        }
    };
    newLedgerThread.start();

    Thread flushThread = new Thread() {
        public void run() {
            try {
                while (true) {
                    Long id = ledgerQ.peek();
                    if (id == null) {
                        continue;
                    }
                    LOG.info("Put entry for {}", id);
                    try {
                        ledgerCache.putEntryOffset((long) id, 1, 0);
                    } catch (Bookie.NoLedgerException nle) {
                        //ignore
                    }
                    ledgerCache.flushLedger(true);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in flush thread", e);
            }
        }
    };
    flushThread.start();

    Thread deleteThread = new Thread() {
        public void run() {
            try {
                while (true) {
                    long id = ledgerQ.take();
                    LOG.info("Deleting {}", id);
                    ledgerCache.deleteLedger(id);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in delete thread", e);
            }
        }
    };
    deleteThread.start();

    newLedgerThread.join();
    assertEquals("Should have been no errors", rc.get(), 0);

    deleteThread.interrupt();
    flushThread.interrupt();
}

From source file:io.cloudslang.orchestrator.services.OrchestratorDispatcherServiceImpl.java

private void dispatch(List<? extends Serializable> messages) {
    Validate.notNull(messages, "Messages list is null");

    if (logger.isDebugEnabled())
        logger.debug("Dispatching " + messages.size() + " messages");
    long t = System.currentTimeMillis();
    final AtomicInteger messagesCounter = new AtomicInteger(0);

    dispatch(messages, ExecutionMessage.class, new Handler<ExecutionMessage>() {
        @Override//from w w w.  ja  v a 2  s.  c  o  m
        public void handle(List<ExecutionMessage> messages) {
            messagesCounter.addAndGet(messages.size());
            queueDispatcher.dispatch(messages);
        }
    });

    dispatch(messages, SplitMessage.class, new Handler<SplitMessage>() {
        @Override
        public void handle(List<SplitMessage> messages) {
            messagesCounter.addAndGet(messages.size());
            splitJoinService.split(messages);
        }
    });

    t = System.currentTimeMillis() - t;
    if (logger.isDebugEnabled())
        logger.debug("Dispatching " + messagesCounter.get() + " messages is done in " + t + " ms");
    if (messages.size() > messagesCounter.get()) {
        logger.warn((messages.size() - messagesCounter.get())
                + " messages were not being dispatched, since unknown type");
    }
}

From source file:com.asakusafw.lang.compiler.cli.BatchCompilerCliTest.java

/**
 * w/ scoped properties.//from w w  w .j a v  a  2 s  .  co m
 * @throws Exception if failed
 */
@Test
public void execute_scoped_properties() throws Exception {
    File output = deployer.newFolder();
    String[] args = strings(new Object[] { "--explore",
            files(ResourceUtil.findLibraryByClass(DummyBatch.class)), "--output", output, "--classAnalyzer",
            classes(DummyClassAnalyzer.class), "--batchCompiler", classes(DelegateBatchCompiler.class),
            "--include", classes(DummyBatch.class), "--externalPortProcessors",
            classes(DummyExternalPortProcessor.class), "--batchIdPrefix", "prefix.", "-P", "a=A", "-P", "b=B",
            "-P", "DummyBatch:b=!", "-P", "DummyBatch:c=C", "-P", "other:a=INVALID", });
    AtomicInteger count = new AtomicInteger();
    int status = execute(args, (context, batch) -> {
        count.incrementAndGet();
        CompilerOptions options = context.getOptions();
        assertThat(options.get("a", "?"), is("A"));
        assertThat(options.get("b", "?"), is("!"));
        assertThat(options.get("c", "?"), is("C"));
    });
    assertThat(status, is(0));
    assertThat(count.get(), is(1));
}

From source file:org.apache.activemq.web.RestTest.java

@Test(timeout = 15 * 1000)
public void testPost() throws Exception {
    int port = getPort();

    HttpClient httpClient = new HttpClient();
    httpClient.start();//  ww w .  j  a va  2  s.  c o m

    final CountDownLatch latch = new CountDownLatch(1);
    final StringBuffer buf = new StringBuffer();
    final AtomicInteger status = new AtomicInteger();
    httpClient.newRequest("http://localhost:" + port + "/message/testPost?type=queue").method(HttpMethod.POST)
            .send(new BufferingResponseListener() {
                @Override
                public void onComplete(Result result) {
                    status.getAndSet(result.getResponse().getStatus());
                    buf.append(getContentAsString());
                    latch.countDown();
                }
            });

    latch.await();
    assertTrue("success status", HttpStatus.isSuccess(status.get()));

    final StringBuffer buf2 = new StringBuffer();
    final AtomicInteger status2 = new AtomicInteger();
    final CountDownLatch latch2 = asyncRequest(httpClient,
            "http://localhost:" + port + "/message/testPost?readTimeout=1000&type=Queue", buf2, status2);

    latch2.await();
    assertTrue("success status", HttpStatus.isSuccess(status2.get()));
}

From source file:com.couchbase.client.core.endpoint.view.ViewHandlerTest.java

@Test
public void shouldDecodeEmptyViewQueryResponse() throws Exception {
    String response = Resources.read("query_empty.json", this.getClass());
    HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
            new HttpResponseStatus(200, "OK"));
    HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8));

    ViewQueryRequest requestMock = mock(ViewQueryRequest.class);
    queue.add(requestMock);//  w  ww  . j  av a 2s. co m
    channel.writeInbound(responseHeader, responseChunk);
    latch.await(1, TimeUnit.SECONDS);
    assertEquals(1, firedEvents.size());
    ViewQueryResponse inbound = (ViewQueryResponse) firedEvents.get(0);

    assertTrue(inbound.status().isSuccess());
    assertTrue(inbound.rows().toList().toBlocking().single().isEmpty());

    final AtomicInteger called = new AtomicInteger();
    inbound.info().toBlocking().forEach(new Action1<ByteBuf>() {
        @Override
        public void call(ByteBuf byteBuf) {
            called.incrementAndGet();
            assertEquals("{\"total_rows\":7303}", byteBuf.toString(CharsetUtil.UTF_8));
        }
    });
    assertEquals(1, called.get());
}

From source file:com.joyent.manta.client.jobs.MantaClientJobIT.java

@Test
public void canListOutputsForJobAsStreams() throws IOException, InterruptedException {
    String path1 = String.format("%s/%s", testPathPrefix, UUID.randomUUID());
    mantaClient.put(path1, TEST_DATA);//from w w  w  .java  2  s.c o m

    String path2 = String.format("%s/%s", testPathPrefix, UUID.randomUUID());
    mantaClient.put(path2, TEST_DATA);

    final MantaJob job = buildJob();
    final UUID jobId = mantaClient.createJob(job);

    List<String> inputs = new ArrayList<>();
    inputs.add(path1);
    inputs.add(path2);

    mantaClient.addJobInputs(jobId, inputs.iterator());
    Assert.assertTrue(mantaClient.endJobInput(jobId));

    awaitJobCompletion(jobId);

    final AtomicInteger count = new AtomicInteger(0);

    mantaClient.getJobOutputsAsStreams(jobId).forEach(o -> {
        count.incrementAndGet();
        try {
            String content = IOUtils.toString(o, Charset.defaultCharset());
            Assert.assertEquals(content, TEST_DATA);
        } catch (IOException e) {
            throw new UncheckedIOException(e);
        }
    });

    Assert.assertEquals(count.get(), 2, "Missing both outputs");
}