Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids./*from  ww w  .  j  a v  a  2 s.  co m*/
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region3);
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    HLog wal = createWAL(this.conf);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flushcache();
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.close();
    runWALSplit(this.conf);
    HLog wal2 = createWAL(this.conf);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());

    // Next test.  Add more edits, then 'crash' this region by stealing its wal
    // out from under it and assert that replay of the log adds the edits back
    // correctly when region is opened again.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    // Set down maximum recovery so we dfsclient doesn't linger retrying something
    // long gone.
    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal2).getOutputStream(), 1);
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            HLog wal3 = createWAL(newConf);
            final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
            HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
                @Override
                protected boolean restoreEdit(Store s, KeyValue kv) {
                    boolean b = super.restoreEdit(s, kv);
                    countOfRestoredEdits.incrementAndGet();
                    return b;
                }
            };
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get());

            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.closeAndDelete();
            return null;
        }
    });
}

From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClientTest.java

@Test
public void testConcurrentUpdate() throws Exception {
    TestServlet.clear();//w  w w.  j a va2s.c om

    String serverUrl = jetty.getBaseUrl().toString() + "/cuss/foo";

    int cussThreadCount = 2;
    int cussQueueSize = 100;

    // for tracking callbacks from CUSS
    final AtomicInteger successCounter = new AtomicInteger(0);
    final AtomicInteger errorCounter = new AtomicInteger(0);
    final StringBuilder errors = new StringBuilder();

    @SuppressWarnings("serial")
    ConcurrentUpdateSolrClient concurrentClient = new OutcomeCountingConcurrentUpdateSolrClient(serverUrl,
            cussQueueSize, cussThreadCount, successCounter, errorCounter, errors);

    concurrentClient.setPollQueueTime(0);

    // ensure it doesn't block where there's nothing to do yet
    concurrentClient.blockUntilFinished();

    int poolSize = 5;
    ExecutorService threadPool = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize,
            new SolrjNamedThreadFactory("testCUSS"));

    int numDocs = 100;
    int numRunnables = 5;
    for (int r = 0; r < numRunnables; r++)
        threadPool.execute(new SendDocsRunnable(String.valueOf(r), numDocs, concurrentClient));

    // ensure all docs are sent
    threadPool.awaitTermination(5, TimeUnit.SECONDS);
    threadPool.shutdown();

    // wait until all requests are processed by CUSS 
    concurrentClient.blockUntilFinished();
    concurrentClient.shutdownNow();

    assertEquals("post", TestServlet.lastMethod);

    // expect all requests to be successful
    int expectedSuccesses = TestServlet.numReqsRcvd.get();
    assertTrue(expectedSuccesses > 0); // at least one request must have been sent

    assertTrue("Expected no errors but got " + errorCounter.get() + ", due to: " + errors.toString(),
            errorCounter.get() == 0);
    assertTrue("Expected " + expectedSuccesses + " successes, but got " + successCounter.get(),
            successCounter.get() == expectedSuccesses);

    int expectedDocs = numDocs * numRunnables;
    assertTrue("Expected CUSS to send " + expectedDocs + " but got " + TestServlet.numDocsRcvd.get(),
            TestServlet.numDocsRcvd.get() == expectedDocs);
}

From source file:com.vmware.admiral.adapter.docker.service.DockerAdapterService.java

private void processPullImageFromRegistryWithRetry(RequestContext context, CommandInput createImageCommandInput,
        CompletionHandler imageCompletionHandler, int retriesCount, int maxRetryCount) {
    AtomicInteger retryCount = new AtomicInteger(retriesCount);
    context.executor.createImage(createImageCommandInput, (op, ex) -> {
        if (ex != null && RETRIABLE_HTTP_STATUSES.contains(op.getStatusCode())
                && retryCount.getAndIncrement() < maxRetryCount) {
            String fullImageName = DockerImage.fromImageName(context.containerDescription.image).toString();
            logWarning("Pulling image %s failed with %s. Retries left %d", fullImageName, Utils.toString(ex),
                    maxRetryCount - retryCount.get());
            processPullImageFromRegistryWithRetry(context, createImageCommandInput, imageCompletionHandler,
                    retryCount.get(), maxRetryCount);
        } else {//from w  ww.  ja  v  a  2 s . c o m
            imageCompletionHandler.handle(op, ex);
        }
    });
}

From source file:de.acosix.alfresco.mtsupport.repo.auth.ldap.EnhancedLDAPUserRegistry.java

@Override
public Collection<NodeDescription> getPersons(final Date modifiedSince) {
    final String query;
    if (modifiedSince == null) {
        query = this.personQuery;
    } else {//from  ww  w. j  ava 2s  .co  m
        final MessageFormat mf = new MessageFormat(this.personDifferentialQuery, Locale.ENGLISH);
        query = mf.format(new Object[] { this.timestampFormat.format(modifiedSince) });
    }

    final Supplier<InitialDirContext> contextSupplier = this.buildContextSupplier();
    final Function<InitialDirContext, Boolean> nextPageChecker = this.buildNextPageChecker();
    final Function<InitialDirContext, NamingEnumeration<SearchResult>> userSearcher = this
            .buildUserSearcher(query);

    final AtomicInteger totalEstimatedSize = new AtomicInteger(-1);
    if (this.enableProgressEstimation) {
        this.processQuery((result) -> {
            totalEstimatedSize.getAndIncrement();
        }, this.userSearchBase, query, new String[0]);
    }

    final NodeMapper userMapper = this.buildUserMapper();
    return new PersonCollection(contextSupplier, nextPageChecker, userSearcher, userMapper, this.queryBatchSize,
            totalEstimatedSize.get());
}

From source file:com.twitter.distributedlog.lock.TestZKSessionLock.java

@Test(timeout = 60000)
public void testExecuteLockAction() throws Exception {
    String lockPath = "/test-execute-lock-action";
    String clientId = "test-execute-lock-action-" + System.currentTimeMillis();

    ZKSessionLock lock = new ZKSessionLock(zkc, lockPath, clientId, lockStateExecutor);

    final AtomicInteger counter = new AtomicInteger(0);

    // lock action would be executed in same epoch
    final CountDownLatch latch1 = new CountDownLatch(1);
    lock.executeLockAction(lock.getEpoch().get(), new LockAction() {
        @Override//from   w  w w  .ja v  a  2  s. co m
        public void execute() {
            counter.incrementAndGet();
            latch1.countDown();
        }

        @Override
        public String getActionName() {
            return "increment1";
        }
    });
    latch1.await();
    assertEquals("counter should be increased in same epoch", 1, counter.get());

    // lock action would not be executed in same epoch
    final CountDownLatch latch2 = new CountDownLatch(1);
    lock.executeLockAction(lock.getEpoch().get() + 1, new LockAction() {
        @Override
        public void execute() {
            counter.incrementAndGet();
        }

        @Override
        public String getActionName() {
            return "increment2";
        }
    });
    lock.executeLockAction(lock.getEpoch().get(), new LockAction() {
        @Override
        public void execute() {
            latch2.countDown();
        }

        @Override
        public String getActionName() {
            return "countdown";
        }
    });
    latch2.await();
    assertEquals("counter should not be increased in different epochs", 1, counter.get());

    // lock action would not be executed in same epoch and promise would be satisfied with exception
    Promise<BoxedUnit> promise = new Promise<BoxedUnit>();
    lock.executeLockAction(lock.getEpoch().get() + 1, new LockAction() {
        @Override
        public void execute() {
            counter.incrementAndGet();
        }

        @Override
        public String getActionName() {
            return "increment3";
        }
    }, promise);
    try {
        Await.result(promise);
        fail("Should satisfy promise with epoch changed exception.");
    } catch (EpochChangedException ece) {
        // expected
    }
    assertEquals("counter should not be increased in different epochs", 1, counter.get());

    lockStateExecutor.shutdown();
}

From source file:com.adobe.acs.commons.workflow.process.impl.SyntheticWrapperWorkflowProcess.java

@Override
public void execute(WorkItem workItem, WorkflowSession workflowSession, MetaDataMap metaDataMap)
        throws WorkflowException {
    ResourceResolver resourceResolver = null;
    final SyntheticWorkflowRunner syntheticWorkflowRunner = syntheticWorkflowRunnerAccessor
            .getSyntheticWorkflowRunner();

    final String payload = (String) workItem.getWorkflowData().getPayload();
    final ProcessArgs processArgs = new ProcessArgs(metaDataMap);

    try {// ww  w. j  av a 2s .  c om
        resourceResolver = workflowHelper.getResourceResolver(workflowSession);
        final SyntheticWorkflowModel syntheticWorkflowModel = syntheticWorkflowRunner
                .getSyntheticWorkflowModel(resourceResolver, processArgs.getWorkflowModelId(), true);

        final AtomicInteger count = new AtomicInteger(0);

        // Anonymous inner class to facilitate counting of processed payloads
        final ResourceRunnable syntheticRunnable = new ResourceRunnable() {
            @Override
            public void run(final Resource resource) throws java.lang.Exception {
                if (processArgs.isThrottle()) {
                    throttledTaskRunner.waitForLowCpuAndLowMemory();
                }

                syntheticWorkflowRunner.execute(resource.getResourceResolver(), resource.getPath(),
                        syntheticWorkflowModel, false, false);

                // Commit as needed
                if (processArgs.getSaveInterval() > 0
                        && count.incrementAndGet() % processArgs.getSaveInterval() == 0
                        && resource.getResourceResolver().hasChanges()) {
                    resource.getResourceResolver().commit();
                }
            }
        };

        final ContentVisitor visitor = new ContentVisitor(syntheticRunnable);
        final Resource resource = resourceResolver.getResource(payload);

        if (processArgs.isTraverseTree()) {
            visitor.accept(resource);
        } else {
            syntheticRunnable.run(resource);
        }

        if (processArgs.getSaveInterval() > 0 && resourceResolver.hasChanges()) {
            // Commit any stranglers
            resourceResolver.commit();
        }

        log.info("Synthetic Workflow Wrapper processed [ {} ] total payloads", count.get());
    } catch (Exception e) {
        throw new WorkflowException(e);
    }
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldGroupErrorsAndWarnings() throws InterruptedException {
    String response = Resources.read("errors_and_warnings.json", this.getClass());
    HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
            new HttpResponseStatus(200, "OK"));
    HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8));

    GenericQueryRequest requestMock = mock(GenericQueryRequest.class);
    queue.add(requestMock);/* w ww. j  a  va2s  . c o m*/
    channel.writeInbound(responseHeader, responseChunk);
    latch.await(1, TimeUnit.SECONDS);
    assertEquals(1, firedEvents.size());
    GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0);

    Map<String, Object> expectedMetrics = expectedMetricsCounts(1, 0);
    expectedMetrics.put("warningCount", 1);

    final AtomicInteger count = new AtomicInteger(0);
    assertResponse(inbound, false, ResponseStatus.FAILURE, FAKE_REQUESTID, FAKE_CLIENTID, "fatal", null,
            new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf byteBuf) {
                    fail("no result expected");
                }
            }, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf buf) {
                    count.incrementAndGet();
                    String response = buf.toString(CharsetUtil.UTF_8);
                    buf.release();
                    try {
                        Map error = mapper.readValue(response, Map.class);
                        assertEquals(5, error.size());
                        if (count.get() == 1) {
                            assertEquals(new Integer(4100), error.get("code"));
                            assertEquals(Boolean.FALSE, error.get("temp"));
                            assertEquals("Parse Error", error.get("msg"));
                        } else if (count.get() == 2) {
                            assertEquals(3, error.get("sev"));
                            assertEquals(201, error.get("code"));
                            assertEquals(Boolean.TRUE, error.get("temp"));
                            assertEquals("Nothing to do", error.get("msg"));
                            assertEquals("nothingToDo", error.get("name"));
                        }
                    } catch (IOException e) {
                        fail();
                    }
                }
            }, expectedMetrics);
    assertEquals(2, count.get());
}

From source file:org.apache.bookkeeper.metadata.etcd.EtcdRegistrationTest.java

private void testConcurrentRegistration(boolean readonly) throws Exception {
    final String bookieId;
    if (readonly) {
        bookieId = runtime.getMethodName() + "-readonly:3181";
    } else {/*from   ww  w  .j a v  a2  s  . com*/
        bookieId = runtime.getMethodName() + ":3181";
    }
    final int numBookies = 10;
    @Cleanup("shutdown")
    ExecutorService executor = Executors.newFixedThreadPool(numBookies);
    final CyclicBarrier startBarrier = new CyclicBarrier(numBookies);
    final CyclicBarrier completeBarrier = new CyclicBarrier(numBookies);
    final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
    final AtomicInteger numSuccesses = new AtomicInteger(0);
    final AtomicInteger numFailures = new AtomicInteger(0);
    for (int i = 0; i < numBookies; i++) {
        executor.submit(() -> {
            try (EtcdRegistrationManager regMgr = new EtcdRegistrationManager(newEtcdClient(), scope, 1)) {
                try {
                    startBarrier.await();
                    regMgr.registerBookie(bookieId, readonly);
                    numSuccesses.incrementAndGet();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to start", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Start barrier is broken", e);
                } catch (BookieException e) {
                    numFailures.incrementAndGet();
                }
                try {
                    completeBarrier.await();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to complete", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Complete barrier is broken", e);
                }
                FutureUtils.complete(doneFuture, null);
            }
        });
    }
    doneFuture.join();
    assertEquals(1, numSuccesses.get());
    assertEquals(numBookies - 1, numFailures.get());
}

From source file:com.adobe.acs.commons.mcp.impl.processes.renovator.Renovator.java

private void identifyStructureFromRoot(AtomicInteger visitedSourceNodes, String source, String dest,
        ResourceResolver rr, Resource res, MovingNode root) throws TraversalException {
    root.setDestinationPath(dest);//  ww w.  jav a  2s.  com
    if (root instanceof MovingAsset) {
        String destFolder = StringUtils.substringBeforeLast(dest, "/");
        if (!additionalTargetFolders.contains(destFolder) && rr.getResource(destFolder) == null) {
            additionalTargetFolders.add(destFolder);
        }
    }
    moves.add(root);
    note(source, Report.misc, "Root path");
    note(source, Report.target, dest);

    TreeFilteringResourceVisitor visitor = new TreeFilteringResourceVisitor(JcrConstants.NT_FOLDER,
            JcrResourceConstants.NT_SLING_FOLDER, JcrResourceConstants.NT_SLING_ORDERED_FOLDER,
            NameConstants.NT_PAGE);

    visitor.setResourceVisitorChecked((r, level) -> buildMoveTree(r, level, root, visitedSourceNodes));
    visitor.setLeafVisitorChecked((r, level) -> buildMoveTree(r, level, root, visitedSourceNodes));

    visitor.accept(res);
    note("All scanned nodes", Report.misc, "Scanned " + visitedSourceNodes.get() + " source nodes.");
}

From source file:org.apache.bookkeeper.replication.TestLedgerUnderreplicationManager.java

/**
 * Test that as the hierarchy gets cleaned up, it doesn't interfere
 * with the marking of other ledgers as underreplicated
 *//*  w w  w  .j  ava  2s .  co m*/
@Test(timeout = 90000)
public void testHierarchyCleanupInterference() throws Exception {
    final LedgerUnderreplicationManager replicaMgr1 = lmf1.newLedgerUnderreplicationManager();
    final LedgerUnderreplicationManager replicaMgr2 = lmf2.newLedgerUnderreplicationManager();

    final int iterations = 100;
    final AtomicBoolean threadFailed = new AtomicBoolean(false);
    Thread markUnder = new Thread() {
        public void run() {
            long l = 1;
            try {
                for (int i = 0; i < iterations; i++) {
                    replicaMgr1.markLedgerUnderreplicated(l, "localhost:3181");
                    l += 10000;
                }
            } catch (Exception e) {
                LOG.error("markUnder Thread failed with exception", e);
                threadFailed.set(true);
                return;
            }
        }
    };
    final AtomicInteger processed = new AtomicInteger(0);
    Thread markRepl = new Thread() {
        public void run() {
            try {
                for (int i = 0; i < iterations; i++) {
                    long l = replicaMgr2.getLedgerToRereplicate();
                    replicaMgr2.markLedgerReplicated(l);
                    processed.incrementAndGet();
                }
            } catch (Exception e) {
                LOG.error("markRepl Thread failed with exception", e);
                threadFailed.set(true);
                return;
            }
        }
    };
    markRepl.setDaemon(true);
    markUnder.setDaemon(true);

    markRepl.start();
    markUnder.start();
    markUnder.join();
    assertFalse("Thread failed to complete", threadFailed.get());

    int lastProcessed = 0;
    while (true) {
        markRepl.join(10000);
        if (!markRepl.isAlive()) {
            break;
        }
        assertFalse("markRepl thread not progressing", lastProcessed == processed.get());
    }
    assertFalse("Thread failed to complete", threadFailed.get());

    List<String> children = zkc1.getChildren(urLedgerPath, false);
    for (String s : children) {
        LOG.info("s: {}", s);
    }
    assertEquals("All hierarchies should be cleaned up", 0, children.size());
}