Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureQueue.java

/**
 * Verify that "write" operations for a single table are serialized,
 * but different tables can be executed in parallel.
 *///from  w  w w  .ja v a2 s . c  om
@Test(timeout = 90000)
public void testConcurrentWriteOps() throws Exception {
    final TestTableProcSet procSet = new TestTableProcSet(queue);

    final int NUM_ITEMS = 10;
    final int NUM_TABLES = 4;
    final AtomicInteger opsCount = new AtomicInteger(0);
    for (int i = 0; i < NUM_TABLES; ++i) {
        TableName tableName = TableName.valueOf(String.format("testtb-%04d", i));
        for (int j = 1; j < NUM_ITEMS; ++j) {
            procSet.addBack(new TestTableProcedure(i * 100 + j, tableName,
                    TableProcedureInterface.TableOperationType.EDIT));
            opsCount.incrementAndGet();
        }
    }
    assertEquals(opsCount.get(), queue.size());

    final Thread[] threads = new Thread[NUM_TABLES * 2];
    final HashSet<TableName> concurrentTables = new HashSet<TableName>();
    final ArrayList<String> failures = new ArrayList<String>();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new Thread() {
            @Override
            public void run() {
                while (opsCount.get() > 0) {
                    try {
                        TableProcedureInterface proc = procSet.acquire();
                        if (proc == null) {
                            queue.signalAll();
                            if (opsCount.get() > 0) {
                                continue;
                            }
                            break;
                        }
                        synchronized (concurrentTables) {
                            assertTrue("unexpected concurrency on " + proc.getTableName(),
                                    concurrentTables.add(proc.getTableName()));
                        }
                        assertTrue(opsCount.decrementAndGet() >= 0);
                        try {
                            long procId = ((Procedure) proc).getProcId();
                            TableName tableId = proc.getTableName();
                            int concurrent = concurrentCount.incrementAndGet();
                            assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES,
                                    concurrent >= 1 && concurrent <= NUM_TABLES);
                            LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            Thread.sleep(2000);
                            concurrent = concurrentCount.decrementAndGet();
                            LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES);
                        } finally {
                            synchronized (concurrentTables) {
                                assertTrue(concurrentTables.remove(proc.getTableName()));
                            }
                            procSet.release(proc);
                        }
                    } catch (Throwable e) {
                        LOG.error("Failed " + e.getMessage(), e);
                        synchronized (failures) {
                            failures.add(e.getMessage());
                        }
                    } finally {
                        queue.signalAll();
                    }
                }
            }
        };
        threads[i].start();
    }
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }
    assertTrue(failures.toString(), failures.isEmpty());
    assertEquals(0, opsCount.get());
    assertEquals(0, queue.size());

    for (int i = 1; i <= NUM_TABLES; ++i) {
        TableName table = TableName.valueOf(String.format("testtb-%04d", i));
        assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table));
    }
}

From source file:com.adobe.acs.commons.workflow.process.impl.SyncSmartTagsToXmpMetadataNodeProcess.java

protected void syncSmartTagsToMetadata(final Asset asset, ProcessArgs processArgs) throws PersistenceException {
    final Resource assetResource = asset.adaptTo(Resource.class);
    final ResourceResolver resourceResolver = assetResource.getResourceResolver();

    final Resource metadataResource = assetResource
            .getChild(JcrConstants.JCR_CONTENT + "/" + DamConstants.METADATA_FOLDER);
    final Resource smartTagsResource = assetResource
            .getChild(JcrConstants.JCR_CONTENT + "/" + DamConstants.METADATA_FOLDER + "/" + NN_PREDICTED_TAGS);

    if (metadataResource.getChild(processArgs.getSequenceName()) != null) {
        // Remove existing, as they will be re-created
        resourceResolver.delete(metadataResource.getChild(processArgs.getSequenceName()));
    }/*from   w  w  w  .  jav  a2 s.c  o  m*/

    final Resource parentResource = resourceResolver.create(metadataResource, processArgs.getSequenceName(),
            new ImmutableMap.Builder<String, Object>()
                    .put(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED)
                    .put("xmpArrayType", "rdf:Seq").put("xmpNodeType", "xmpArray").put("xmpArraySize", 0L)
                    .build());

    final AtomicInteger count = new AtomicInteger(0);
    if (smartTagsResource != null) {
        StreamSupport.stream(smartTagsResource.getChildren().spliterator(), false).map(Resource::getValueMap)
                .filter(properties -> properties.get(PN_SMART_TAG_CONFIDENCE, 0D) >= processArgs
                        .getMinimumConfidence())
                .filter(properties -> StringUtils.isNotBlank(properties.get(PN_SMART_TAG_NAME, String.class)))
                .forEach(properties -> {
                    createSequenceItemResource(asset, processArgs, resourceResolver, parentResource, count,
                            properties);
                });
    }

    parentResource.adaptTo(ModifiableValueMap.class).put("xmpArraySize", count.get());

    log.info("Synced [ {} ] Smart Tags to Asset XMP Metadata structure: [ {} ] ", count.get(),
            asset.getPath() + "/jcr:content/metadata/" + processArgs.getSequenceName());
}

From source file:com.netflix.curator.framework.recipes.barriers.TestDistributedDoubleBarrier.java

@Test
public void testBasic() throws Exception {
    final Timing timing = new Timing();
    final List<Closeable> closeables = Lists.newArrayList();
    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    try {/*from w w  w.j a v  a2s . c  om*/
        closeables.add(client);
        client.start();

        final CountDownLatch postEnterLatch = new CountDownLatch(QTY);
        final CountDownLatch postLeaveLatch = new CountDownLatch(QTY);
        final AtomicInteger count = new AtomicInteger(0);
        final AtomicInteger max = new AtomicInteger(0);
        List<Future<Void>> futures = Lists.newArrayList();
        ExecutorService service = Executors.newCachedThreadPool();
        for (int i = 0; i < QTY; ++i) {
            Future<Void> future = service.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    DistributedDoubleBarrier barrier = new DistributedDoubleBarrier(client, "/barrier", QTY);

                    Assert.assertTrue(barrier.enter(timing.seconds(), TimeUnit.SECONDS));

                    synchronized (TestDistributedDoubleBarrier.this) {
                        int thisCount = count.incrementAndGet();
                        if (thisCount > max.get()) {
                            max.set(thisCount);
                        }
                    }

                    postEnterLatch.countDown();
                    Assert.assertTrue(timing.awaitLatch(postEnterLatch));

                    Assert.assertEquals(count.get(), QTY);

                    Assert.assertTrue(barrier.leave(10, TimeUnit.SECONDS));
                    count.decrementAndGet();

                    postLeaveLatch.countDown();
                    Assert.assertTrue(timing.awaitLatch(postLeaveLatch));

                    return null;
                }
            });
            futures.add(future);
        }

        for (Future<Void> f : futures) {
            f.get();
        }
        Assert.assertEquals(count.get(), 0);
        Assert.assertEquals(max.get(), QTY);
    } finally {
        for (Closeable c : closeables) {
            IOUtils.closeQuietly(c);
        }
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility.java

private static int countMetaRegions(final HMaster master, final TableName tableName) throws IOException {
    final AtomicInteger actualRegCount = new AtomicInteger(0);
    final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
        @Override/* w  w  w .  j a  v  a2 s  .  c om*/
        public boolean visit(Result rowResult) throws IOException {
            RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
            if (list == null) {
                LOG.warn("No serialized HRegionInfo in " + rowResult);
                return true;
            }
            HRegionLocation l = list.getRegionLocation();
            if (l == null) {
                return true;
            }
            if (!l.getRegionInfo().getTable().equals(tableName)) {
                return false;
            }
            if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit())
                return true;
            HRegionLocation[] locations = list.getRegionLocations();
            for (HRegionLocation location : locations) {
                if (location == null)
                    continue;
                ServerName serverName = location.getServerName();
                // Make sure that regions are assigned to server
                if (serverName != null && serverName.getHostAndPort() != null) {
                    actualRegCount.incrementAndGet();
                }
            }
            return true;
        }
    };
    MetaTableAccessor.scanMetaForTableRegions(master.getConnection(), visitor, tableName);
    return actualRegCount.get();
}

From source file:org.apache.blur.shell.QueryCommand.java

private String displayRecordInRowMultiFamilyView(int result, final TableDisplay tableDisplay,
        final AtomicInteger line, final Map<String, List<String>> columnOrder, final String currentFamily,
        final Record record) {
    int c = 3;// w  ww. ja v a 2s. c  o  m
    List<String> orderedColumns = getOrderColumnValues(record, columnOrder);
    String family = record.getFamily();
    if (!family.equals(currentFamily)) {
        List<String> list = columnOrder.get(family);
        for (int i = 0; i < list.size(); i++) {
            tableDisplay.set(i + c, line.get(),
                    highlight(getTruncatedVersion(toStringBinary(family + "." + list.get(i)))));
        }
        tableDisplay.set(0, line.get(), white(toStringBinary(Integer.toString(result))));
        line.incrementAndGet();
    }
    tableDisplay.set(2, line.get(), white(getTruncatedVersion(toStringBinary(record.getRecordId()))));
    for (String oc : orderedColumns) {
        if (oc != null) {
            tableDisplay.set(c, line.get(), white(getTruncatedVersion(toStringBinary(oc))));
        }
        c++;
    }
    tableDisplay.set(0, line.get(), white(toStringBinary(Integer.toString(result))));
    line.incrementAndGet();
    return family;
}

From source file:com.indeed.lsmtree.recordlog.TestRecordLogDirectory.java

public void testRandom() throws Exception {
    final RecordLogDirectory<String> fileCache = createRecordLogDirectory();
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override//from   w ww. j  a v  a  2  s  .  co m
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < 10000; i++) {
                        int rand = r.nextInt(positions.size());
                        assertTrue(fileCache.get(positions.get(rand)).equals(strings.get(rand)));
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    fileCache.close();
}

From source file:au.org.ala.bhl.service.IndexingService.java

/**
 * Indexes an item that exists in the document cache
 * //from   w  w w  .j  a va2  s  . c  o  m
 * @param item
 */
public void indexItem(final ItemDescriptor item) {

    String itemPathStr = _docCache.getItemDirectoryPath(item.getInternetArchiveId());

    final SolrServer server = createSolrServer();

    log("Indexing pages %s for item %s", itemPathStr, item.getItemId());

    try {
        final AtomicInteger pageCount = new AtomicInteger(0);
        File itemPath = new File(itemPathStr);
        if (itemPath.exists() && itemPath.isDirectory()) {
            File f = _docCache.getPageArchiveFile(item);
            if (f.exists()) {
                _docCache.forEachItemPage(item, new CachedItemPageHandler() {

                    public void startItem(String itemId) {
                    }

                    public void onPage(String iaId, String pageId, String text) {
                        indexPage(item, pageId, text, server);
                        pageCount.incrementAndGet();

                        if (pageCount.get() % 100 == 0) {
                            try {
                                server.commit();
                            } catch (Exception ex) {
                                throw new RuntimeException(ex);
                            }
                        }
                    }

                    public void endItem(String itemId) {
                    }
                });

                if (pageCount.get() > 0) {
                    server.commit();
                    getItemsService().setItemStatus(item.getItemId(), ItemStatus.INDEXED, pageCount.get());
                    log("%s pages indexed for item: %s", pageCount, item.getItemId());
                } else {
                    log("Ignoring empty item (no pages): %s", item.getItemId());
                }
            } else {
                log("Ignoring partial or empty item (no archive file found): %s", item.getInternetArchiveId());
            }
        }

    } catch (Exception ex) {
        ex.printStackTrace();
    }
}

From source file:com.twitter.distributedlog.auditor.DLAuditor.java

private Map<String, Long> calculateStreamSpaceUsage(final URI uri,
        final com.twitter.distributedlog.DistributedLogManagerFactory factory) throws IOException {
    Collection<String> streams = factory.enumerateAllLogsInNamespace();
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    streamQueue.addAll(streams);/* w  ww  .  ja v  a2 s.c om*/

    final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>();
    final AtomicInteger numStreamsCollected = new AtomicInteger(0);

    executeAction(streamQueue, 10, new Action<String>() {
        @Override
        public void execute(String stream) throws IOException {
            streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(factory, stream));
            if (numStreamsCollected.incrementAndGet() % 1000 == 0) {
                logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri);
            }
        }
    });

    return streamSpaceUsageMap;
}

From source file:cc.gospy.example.google.GoogleScholarSpider.java

public Collection<String> getResultLinks(final String keyword, final int pageFrom, final int pageTo) {
    if (pageFrom < 1)
        throw new IllegalArgumentException(pageFrom + "<" + 1);
    if (pageFrom >= pageTo)
        throw new IllegalArgumentException(pageFrom + ">=" + pageTo);

    final AtomicInteger currentPage = new AtomicInteger(pageFrom);
    final AtomicBoolean returned = new AtomicBoolean(false);
    final Collection<String> links = new LinkedHashSet<>();
    Gospy googleScholarSpider = Gospy.custom()
            .setScheduler(//from   w ww  .  jav a  2  s. com
                    Schedulers.VerifiableScheduler.custom().setExitCallback(() -> returned.set(true)).build())
            .addFetcher(Fetchers.HttpFetcher.custom()
                    .before(request -> request.setConfig(
                            RequestConfig.custom().setProxy(new HttpHost("127.0.0.1", 8118)).build()))
                    .build())
            .addProcessor(Processors.XPathProcessor.custom().extract(
                    "//*[@id='gs_ccl_results']/div/div/h3/a/@href", (task, resultList, returnedData) -> {
                        links.addAll(resultList);
                        currentPage.incrementAndGet();
                        if (pageFrom <= currentPage.get() && currentPage.get() < pageTo) {
                            return Arrays.asList(
                                    new Task(String.format("https://scholar.google.com/scholar?start=%d&q=%s",
                                            (currentPage.get() - 1) * 10, keyword)));
                        } else {
                            return Arrays.asList();
                        }
                    }).build())
            .build()
            .addTask(String.format("https://scholar.google.com/scholar?start=%d&q=%s", pageFrom, keyword));
    googleScholarSpider.start();
    while (!returned.get())
        ; // block until spider returned
    googleScholarSpider.stop();
    return links;
}

From source file:org.apache.distributedlog.auditor.DLAuditor.java

private Map<String, Long> calculateStreamSpaceUsage(final URI uri, final Namespace namespace)
        throws IOException {
    Iterator<String> streams = namespace.getLogs();
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    while (streams.hasNext()) {
        streamQueue.add(streams.next());
    }//from   ww w.ja v  a 2  s.c  o m

    final Map<String, Long> streamSpaceUsageMap = new ConcurrentSkipListMap<String, Long>();
    final AtomicInteger numStreamsCollected = new AtomicInteger(0);

    executeAction(streamQueue, 10, new Action<String>() {
        @Override
        public void execute(String stream) throws IOException {
            streamSpaceUsageMap.put(stream, calculateStreamSpaceUsage(namespace, stream));
            if (numStreamsCollected.incrementAndGet() % 1000 == 0) {
                logger.info("Calculated {} streams from uri {}.", numStreamsCollected.get(), uri);
            }
        }
    });

    return streamSpaceUsageMap;
}