List of usage examples for java.util.concurrent.atomic AtomicInteger get
public final int get()
From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java
/** * Test writing edits into an HRegion, closing it, splitting logs, opening * Region again. Verify seqids.//from ww w .ja v a 2s. co m * @throws IOException * @throws IllegalAccessException * @throws NoSuchFieldException * @throws IllegalArgumentException * @throws SecurityException */ @Test public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { final TableName tableName = TableName.valueOf("testReplayEditsWrittenViaHRegion"); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName); deleteDir(basedir); final byte[] rowName = tableName.getName(); final int countPerFamily = 10; final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region3 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd); HBaseTestingUtility.closeRegionAndWAL(region3); // Write countPerFamily edits into the three families. Do a flush on one // of the families during the load of edits so its seqid is not same as // others to test we do right thing when different seqids. WAL wal = createWAL(this.conf, hbaseRootDir, logName); HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal); long seqid = region.getOpenSeqNum(); boolean first = true; for (HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x"); if (first) { // If first, so we have at least one family w/ different seqid to rest. region.flush(true); first = false; } } // Now assert edits made it in. final Get g = new Get(rowName); Result result = region.get(g); assertEquals(countPerFamily * htd.getFamilies().size(), result.size()); // Now close the region (without flush), split the log, reopen the region and assert that // replay of log has the correct effect, that our seqids are calculated correctly so // all edits in logs are seen as 'stale'/old. region.close(true); wal.shutdown(); runWALSplit(this.conf); WAL wal2 = createWAL(this.conf, hbaseRootDir, logName); HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2); long seqid2 = region2.getOpenSeqNum(); assertTrue(seqid + result.size() < seqid2); final Result result1b = region2.get(g); assertEquals(result.size(), result1b.size()); // Next test. Add more edits, then 'crash' this region by stealing its wal // out from under it and assert that replay of the log adds the edits back // correctly when region is opened again. for (HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y"); } // Get count of edits. final Result result2 = region2.get(g); assertEquals(2 * result.size(), result2.size()); wal2.sync(); final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { runWALSplit(newConf); FileSystem newFS = FileSystem.get(newConf); // Make a new wal for new region open. WAL wal3 = createWAL(newConf, hbaseRootDir, logName); final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override protected boolean restoreEdit(Store s, Cell cell) { boolean b = super.restoreEdit(s, cell); countOfRestoredEdits.incrementAndGet(); return b; } }; long seqid3 = region3.initialize(); Result result3 = region3.get(g); // Assert that count of cells is same as before crash. assertEquals(result2.size(), result3.size()); assertEquals(htd.getFamilies().size() * countPerFamily, countOfRestoredEdits.get()); // I can't close wal1. Its been appropriated when we split. region3.close(); wal3.close(); return null; } }); }
From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServerTest.java
@Test public void testConcurrentUpdate() throws Exception { TestServlet.clear();/*from w w w . java 2 s . c o m*/ String serverUrl = jetty.getBaseUrl().toString() + "/cuss/foo"; int cussThreadCount = 2; int cussQueueSize = 100; // for tracking callbacks from CUSS final AtomicInteger successCounter = new AtomicInteger(0); final AtomicInteger errorCounter = new AtomicInteger(0); final StringBuilder errors = new StringBuilder(); @SuppressWarnings("serial") ConcurrentUpdateSolrServer cuss = new ConcurrentUpdateSolrServer(serverUrl, cussQueueSize, cussThreadCount) { @Override public void handleError(Throwable ex) { errorCounter.incrementAndGet(); errors.append(" " + ex); } @Override public void onSuccess(HttpResponse resp) { successCounter.incrementAndGet(); } }; cuss.setParser(new BinaryResponseParser()); cuss.setRequestWriter(new BinaryRequestWriter()); cuss.setPollQueueTime(0); // ensure it doesn't block where there's nothing to do yet cuss.blockUntilFinished(); int poolSize = 5; ExecutorService threadPool = Executors.newFixedThreadPool(poolSize, new SolrjNamedThreadFactory("testCUSS")); int numDocs = 100; int numRunnables = 5; for (int r = 0; r < numRunnables; r++) threadPool.execute(new SendDocsRunnable(String.valueOf(r), numDocs, cuss)); // ensure all docs are sent threadPool.awaitTermination(5, TimeUnit.SECONDS); threadPool.shutdown(); // wait until all requests are processed by CUSS cuss.blockUntilFinished(); cuss.shutdownNow(); assertEquals("post", TestServlet.lastMethod); // expect all requests to be successful int expectedSuccesses = TestServlet.numReqsRcvd.get(); assertTrue(expectedSuccesses > 0); // at least one request must have been sent assertTrue("Expected no errors but got " + errorCounter.get() + ", due to: " + errors.toString(), errorCounter.get() == 0); assertTrue("Expected " + expectedSuccesses + " successes, but got " + successCounter.get(), successCounter.get() == expectedSuccesses); int expectedDocs = numDocs * numRunnables; assertTrue("Expected CUSS to send " + expectedDocs + " but got " + TestServlet.numDocsRcvd.get(), TestServlet.numDocsRcvd.get() == expectedDocs); }
From source file:com.alibaba.druid.benckmark.pool.Case3.java
private void p0(final DataSource dataSource, String name, int threadCount) throws Exception { final AtomicInteger count = new AtomicInteger(); final AtomicInteger errorCount = new AtomicInteger(); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(threadCount); for (int i = 0; i < threadCount; ++i) { Thread thread = new Thread() { public void run() { try { startLatch.await();//from www . jav a 2s .c om for (int i = 0; i < LOOP_COUNT; ++i) { Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(sql); while (rs.next()) { rs.getInt(1); } rs.close(); stmt.close(); conn.close(); count.incrementAndGet(); } } catch (Throwable ex) { errorCount.incrementAndGet(); ex.printStackTrace(); } finally { endLatch.countDown(); } } }; thread.start(); } long startMillis = System.currentTimeMillis(); long startYGC = TestUtil.getYoungGC(); long startFullGC = TestUtil.getFullGC(); startLatch.countDown(); endLatch.await(); long millis = System.currentTimeMillis() - startMillis; long ygc = TestUtil.getYoungGC() - startYGC; long fullGC = TestUtil.getFullGC() - startFullGC; Assert.assertEquals(LOOP_COUNT * threadCount, count.get()); Thread.sleep(1); System.out.println("thread " + threadCount + " " + name + " millis : " + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC " + fullGC); }
From source file:com.alibaba.wasp.meta.FMetaServicesImplWithoutRetry.java
public boolean isTableAvailable(final byte[] tableName) throws IOException { final AtomicBoolean available = new AtomicBoolean(true); final AtomicInteger entityGroupCount = new AtomicInteger(0); FMetaScanner.MetaScannerVisitor visitor = new FMetaScanner.MetaScannerVisitorBase() { @Override//from w w w. ja va 2 s .com public boolean processRow(Result row) throws IOException { EntityGroupInfo info = FMetaScanner.getEntityGroupInfo(row); if (info != null) { if (Bytes.equals(tableName, info.getTableName())) { ServerName sn = ServerName.getServerName(row); if (sn == null) { available.set(false); return false; } entityGroupCount.incrementAndGet(); } } return true; } }; FMetaScanner.metaScan(getConf(), visitor); return available.get() && (entityGroupCount.get() > 0); }
From source file:org.couchbase.mock.client.ClientViewTest.java
@Test public void testViewQueryWithListener() throws Exception { final Query query = new Query(); query.setReduce(false);/*from w ww . j a v a2 s.com*/ HttpFuture<View> future = client.asyncGetView(DESIGN_DOC_W_REDUCE, VIEW_NAME_W_REDUCE); final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger callCount = new AtomicInteger(0); future.addListener(new HttpCompletionListener() { @Override public void onComplete(HttpFuture<?> f) throws Exception { View view = (View) f.get(); HttpFuture<ViewResponse> queryFuture = client.asyncQuery(view, query); queryFuture.addListener(new HttpCompletionListener() { @Override public void onComplete(HttpFuture<?> f) throws Exception { ViewResponse resp = (ViewResponse) f.get(); if (resp.size() == ITEMS.size()) { callCount.incrementAndGet(); latch.countDown(); } } }); } }); assertTrue(latch.await(3, TimeUnit.SECONDS)); assertEquals(1, callCount.get()); }
From source file:com.adobe.acs.commons.mcp.impl.processes.renovator.Renovator.java
protected void identifyReferences(ActionManager manager) { AtomicInteger discoveredReferences = new AtomicInteger(); manager.deferredWithResolver(rr -> { moves.forEach(node -> {//from w w w. ja v a 2 s . c o m manager.deferredWithResolver(rr2 -> { node.visit(childNode -> { if (childNode.isSupposedToBeReferenced()) { manager.deferredWithResolver(rr3 -> { Actions.setCurrentItem("Looking for references to " + childNode.getSourcePath()); findReferences(rr3, childNode); discoveredReferences.addAndGet(childNode.getAllReferences().size()); if (detailedReport) { note(childNode.getSourcePath(), Report.all_references, childNode.getAllReferences().size()); note(childNode.getSourcePath(), Report.published_references, childNode.getPublishedReferences().size()); } }); } }); }); }); }); manager.onFinish(() -> { note("All discovered references", Report.misc, "Discovered " + discoveredReferences.get() + " references."); }); }
From source file:com.blacklocus.jres.request.index.JresUpdateDocumentScriptTest.java
@Test public void testRetryOnConflict() throws InterruptedException { final String index = "JresUpdateDocumentScriptTest.testRetryOnConflict".toLowerCase(); final String type = "test"; final String id = "warzone"; final AtomicInteger total = new AtomicInteger(); final AtomicReference<String> error = new AtomicReference<String>(); final Random random = new Random(System.currentTimeMillis()); final int numThreads = 16, numIterations = 100; ExecutorService x = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { x.submit(new Runnable() { @Override// ww w. j a v a 2 s.c om public void run() { try { for (int j = 0; j < numIterations; j++) { int increment = random.nextInt(5); total.addAndGet(increment); JresUpdateDocumentScript req = new JresUpdateDocumentScript(index, type, id, "ctx._source.value += increment", ImmutableMap.of("increment", increment), ImmutableMap.of("value", increment), null); req.setRetryOnConflict(numIterations * 10); jres.quest(req); } } catch (Exception e) { error.set(e.getMessage()); } } }); } x.shutdown(); x.awaitTermination(1, TimeUnit.MINUTES); Assert.assertNull("With so many retries, all of these should have gotten through without conflict error", error.get()); jres.quest(new JresRefresh(index)); JresGetDocumentReply getReply = jres.quest(new JresGetDocument(index, type, id)); Map<String, Integer> doc = getReply.getSourceAsType(new TypeReference<Map<String, Integer>>() { }); Assert.assertEquals("All increments should have gotten committed", (Object) total.get(), doc.get("value")); Assert.assertEquals("Should have been numThreads * numIterations versions committed", (Object) (numThreads * numIterations), getReply.getVersion()); }
From source file:org.nd4j.linalg.api.test.NDArrayTests.java
@Test public void testVectorDimensionMulti() { INDArray arr = Nd4j.create(Nd4j.linspace(1, 24, 24).data(), new int[] { 4, 3, 2 }); final AtomicInteger count = new AtomicInteger(0); arr.iterateOverDimension(arr.shape().length - 1, new SliceOp() { /**//from ww w.j av a 2 s. co m * Operates on an ndarray slice * * @param nd the result to operate on */ @Override public void operate(INDArray nd) { INDArray test = nd; if (count.get() == 0) { INDArray answer = Nd4j.create(new float[] { 1, 2 }, new int[] { 2 }); assertEquals(answer, test); } else if (count.get() == 1) { INDArray answer = Nd4j.create(new float[] { 3, 4 }, new int[] { 2 }); assertEquals(answer, test); } else if (count.get() == 2) { INDArray answer = Nd4j.create(new float[] { 5, 6 }, new int[] { 2 }); assertEquals(answer, test); } else if (count.get() == 3) { INDArray answer = Nd4j.create(new float[] { 7, 8 }, new int[] { 2 }); assertEquals(answer, test); answer.data().destroy(); } else if (count.get() == 4) { INDArray answer = Nd4j.create(new float[] { 9, 10 }, new int[] { 2 }); assertEquals(answer, test); answer.data().destroy(); } else if (count.get() == 5) { INDArray answer = Nd4j.create(new float[] { 11, 12 }, new int[] { 2 }); assertEquals(answer, test); answer.data().destroy(); } count.incrementAndGet(); } }, false); }
From source file:org.apache.hadoop.hbase.tool.TestLoadIncrementalHFilesSplitRecovery.java
/** * This test exercises the path where there is a split after initial validation but before the * atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a * split just before the atomic region load. *//*from w w w . jav a2 s .co m*/ @Test(timeout = 120000) public void testSplitWhileBulkLoadPhase() throws Exception { final TableName table = TableName.valueOf(name.getMethodName()); try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) { setupTable(connection, table, 10); populateTable(connection, table, 1); assertExpectedTable(table, ROWCOUNT, 1); // Now let's cause trouble. This will occur after checks and cause bulk // files to fail when attempt to atomically import. This is recoverable. final AtomicInteger attemptedCalls = new AtomicInteger(); LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) { @Override protected void bulkLoadPhase(final Table htable, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException { int i = attemptedCalls.incrementAndGet(); if (i == 1) { // On first attempt force a split. forceSplit(table); } super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap); } }; // create HFiles for different column families try (Table t = connection.getTable(table); RegionLocator locator = connection.getRegionLocator(table); Admin admin = connection.getAdmin()) { Path bulk = buildBulkFiles(table, 2); lih2.doBulkLoad(bulk, admin, t, locator); } // check that data was loaded // The three expected attempts are 1) failure because need to split, 2) // load of split top 3) load of split bottom assertEquals(attemptedCalls.get(), 3); assertExpectedTable(table, ROWCOUNT, 2); } }
From source file:org.apache.bookkeeper.client.BookieInfoReader.java
Map<BookieSocketAddress, BookieInfo> getBookieInfo() throws BKException, InterruptedException { BookieClient bkc = bk.getBookieClient(); final AtomicInteger totalSent = new AtomicInteger(); final AtomicInteger totalCompleted = new AtomicInteger(); final ConcurrentMap<BookieSocketAddress, BookieInfo> map = new ConcurrentHashMap<BookieSocketAddress, BookieInfo>(); final CountDownLatch latch = new CountDownLatch(1); long requested = BookkeeperProtocol.GetBookieInfoRequest.Flags.TOTAL_DISK_CAPACITY_VALUE | BookkeeperProtocol.GetBookieInfoRequest.Flags.FREE_DISK_SPACE_VALUE; Collection<BookieSocketAddress> bookies; bookies = bk.bookieWatcher.getBookies(); bookies.addAll(bk.bookieWatcher.getReadOnlyBookies()); totalSent.set(bookies.size());//w ww . ja v a 2 s . c o m for (BookieSocketAddress b : bookies) { bkc.getBookieInfo(b, requested, new GetBookieInfoCallback() { @Override public void getBookieInfoComplete(int rc, BookieInfo bInfo, Object ctx) { BookieSocketAddress b = (BookieSocketAddress) ctx; if (rc != BKException.Code.OK) { if (LOG.isErrorEnabled()) { LOG.error("Reading bookie info from bookie {} failed due to {}", b, BKException.codeLogger(rc)); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Free disk space on bookie {} is {}.", b, bInfo.getFreeDiskSpace()); } map.put(b, bInfo); } if (totalCompleted.incrementAndGet() == totalSent.get()) { latch.countDown(); } } }, b); } try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.error("Received InterruptedException ", e); throw e; } return map; }