Example usage for java.util.concurrent.atomic AtomicInteger decrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger decrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger decrementAndGet.

Prototype

public final int decrementAndGet() 

Source Link

Document

Atomically decrements the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.jivesoftware.os.upena.service.UpenaStore.java

public void log(long whenAgoElapseLargestMillis, long whenAgoElapseSmallestMillis, int minCount, //
        final String who, final String what, final String why, final String where, final String how,
        final LogStream logStream) throws Exception {
    long time = System.currentTimeMillis();
    final long maxTimestampInclusize = time - whenAgoElapseSmallestMillis;
    final long minTimestampExclusize = time - whenAgoElapseLargestMillis;

    final AtomicInteger count = new AtomicInteger(minCount);
    changeLogClient().scan(Collections.singletonList(ScanRange.ROW_SCAN),
            (byte[] prefix, byte[] key, byte[] value, long timestamp, long version) -> {
                RecordedChange change = mapper.readValue(value, RecordedChange.class);
                if (change.when <= maxTimestampInclusize
                        && (change.when > minTimestampExclusize || count.get() > 0)) {
                    if (who != null && who.length() > 0 && !change.who.contains(who)) {
                        return true;
                    }/* w  w w  .j  a v  a 2s.c om*/
                    if (what != null && what.length() > 0 && !change.what.contains(what)) {
                        return true;
                    }
                    if (why != null && why.length() > 0 && !change.why.contains(why)) {
                        return true;
                    }
                    if (where != null && where.length() > 0 && !change.where.contains(where)) {
                        return true;
                    }
                    if (how != null && how.length() > 0 && !change.how.contains(how)) {
                        return true;
                    }
                    count.decrementAndGet();
                    return logStream.stream(change);
                }
                return change.when > maxTimestampInclusize || count.get() > 0;
            }, true);
}

From source file:io.realm.Realm.java

/**
 * Closes the Realm instance and all its resources.
 * <p>//  w  w w.  j av a2 s  .  c om
 * It's important to always remember to close Realm instances when you're done with it in order
 * not to leak memory, file descriptors or grow the size of Realm file out of measure.
 *
 * @throws java.lang.IllegalStateException if trying to close Realm on a different thread than the
 * one it was created on.
 */
@Override
public void close() {
    if (this.threadId != Thread.currentThread().getId()) {
        throw new IllegalStateException(INCORRECT_THREAD_CLOSE_MESSAGE);
    }

    Map<RealmConfiguration, Integer> localRefCount = referenceCount.get();
    String canonicalPath = configuration.getPath();
    Integer references = localRefCount.get(configuration);
    if (references == null) {
        references = 0;
    }
    if (sharedGroup != null && references == 1) {
        realmsCache.get().remove(configuration);
        sharedGroup.close();
        sharedGroup = null;

        // It is necessary to be synchronized here since there is a chance that before the counter removed,
        // the other thread could get the counter and increase it in createAndValidate.
        synchronized (Realm.class) {
            globalPathConfigurationCache.get(canonicalPath).remove(configuration);
            AtomicInteger counter = globalOpenInstanceCounter.get(canonicalPath);
            if (counter.decrementAndGet() == 0) {
                globalOpenInstanceCounter.remove(canonicalPath);
            }
        }
    }

    int refCount = references - 1;
    if (refCount < 0) {
        RealmLog.w("Calling close() on a Realm that is already closed: " + canonicalPath);
    }
    localRefCount.put(configuration, Math.max(0, refCount));

    if (handler != null && refCount <= 0) {
        removeHandler(handler);
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureSchedulerConcurrency.java

/**
 * Verify that "write" operations for a single table are serialized,
 * but different tables can be executed in parallel.
 *//*from w  w w.j  a  v a 2  s .com*/
@Test(timeout = 60000)
public void testConcurrentWriteOps() throws Exception {
    final TestTableProcSet procSet = new TestTableProcSet(queue);

    final int NUM_ITEMS = 10;
    final int NUM_TABLES = 4;
    final AtomicInteger opsCount = new AtomicInteger(0);
    for (int i = 0; i < NUM_TABLES; ++i) {
        TableName tableName = TableName.valueOf(String.format("testtb-%04d", i));
        for (int j = 1; j < NUM_ITEMS; ++j) {
            procSet.addBack(new TestTableProcedure(i * 100 + j, tableName,
                    TableProcedureInterface.TableOperationType.EDIT));
            opsCount.incrementAndGet();
        }
    }
    assertEquals(opsCount.get(), queue.size());

    final Thread[] threads = new Thread[NUM_TABLES * 2];
    final HashSet<TableName> concurrentTables = new HashSet<TableName>();
    final ArrayList<String> failures = new ArrayList<String>();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new Thread() {
            @Override
            public void run() {
                while (opsCount.get() > 0) {
                    try {
                        Procedure proc = procSet.acquire();
                        if (proc == null) {
                            queue.signalAll();
                            if (opsCount.get() > 0) {
                                continue;
                            }
                            break;
                        }

                        TableName tableId = procSet.getTableName(proc);
                        synchronized (concurrentTables) {
                            assertTrue("unexpected concurrency on " + tableId, concurrentTables.add(tableId));
                        }
                        assertTrue(opsCount.decrementAndGet() >= 0);
                        try {
                            long procId = proc.getProcId();
                            int concurrent = concurrentCount.incrementAndGet();
                            assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES,
                                    concurrent >= 1 && concurrent <= NUM_TABLES);
                            LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            Thread.sleep(2000);
                            concurrent = concurrentCount.decrementAndGet();
                            LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES);
                        } finally {
                            synchronized (concurrentTables) {
                                assertTrue(concurrentTables.remove(tableId));
                            }
                            procSet.release(proc);
                        }
                    } catch (Throwable e) {
                        LOG.error("Failed " + e.getMessage(), e);
                        synchronized (failures) {
                            failures.add(e.getMessage());
                        }
                    } finally {
                        queue.signalAll();
                    }
                }
            }
        };
        threads[i].start();
    }
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }
    assertTrue(failures.toString(), failures.isEmpty());
    assertEquals(0, opsCount.get());
    assertEquals(0, queue.size());

    for (int i = 1; i <= NUM_TABLES; ++i) {
        final TableName table = TableName.valueOf(String.format("testtb-%04d", i));
        final TestTableProcedure dummyProc = new TestTableProcedure(100, table,
                TableProcedureInterface.TableOperationType.DELETE);
        assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table, dummyProc));
    }
}

From source file:com.vmware.admiral.compute.container.HostContainerListDataCollection.java

private void updateNumberOfContainers(String containerHostLink) {
    // There are two operations: get all the containers and get the system containers
    AtomicInteger counter = new AtomicInteger(2);
    ComputeState state = new ComputeState();
    state.customProperties = new HashMap<String, String>();
    QueryTask containerQuery = QueryUtil.buildPropertyQuery(ContainerState.class,
            ContainerState.FIELD_NAME_PARENT_LINK, containerHostLink);
    QueryUtil.addCountOption(containerQuery);

    new ServiceDocumentQuery<ContainerState>(getHost(), ContainerState.class).query(containerQuery, (r) -> {
        if (r.hasException()) {
            logWarning("Failed to retrieve containers for host:", containerHostLink);
        } else {//from   w  ww  .ja va  2s  .c  om
            logFine("Found %s containers for container host: %s", String.valueOf(r.getCount()),
                    containerHostLink);
            state.customProperties.put(ContainerHostService.NUMBER_OF_CONTAINERS_PER_HOST_PROP_NAME,
                    String.valueOf(r.getCount()));
            if (counter.decrementAndGet() == 0) {
                patchHostState(containerHostLink, state);
            }
        }
    });
    QueryTask systemContainerQuery = QueryUtil.buildPropertyQuery(ContainerState.class,
            ContainerState.FIELD_NAME_PARENT_LINK, containerHostLink);
    QueryUtil.addListValueClause(systemContainerQuery, ContainerState.FIELD_NAME_SYSTEM,
            Arrays.asList(Boolean.TRUE.toString()));
    QueryUtil.addCountOption(systemContainerQuery);
    new ServiceDocumentQuery<ContainerState>(getHost(), ContainerState.class).query(systemContainerQuery,
            (result) -> {
                if (result.hasException()) {
                    logWarning("Failed to retrieve system containers for host:", containerHostLink);
                } else {
                    state.customProperties.put(ContainerHostService.NUMBER_OF_SYSTEM_CONTAINERS_PROP_NAME,
                            String.valueOf(result.getCount()));
                    if (counter.decrementAndGet() == 0) {
                        patchHostState(containerHostLink, state);
                    }
                }
            });
}

From source file:com.twitter.distributedlog.service.balancer.ClusterBalancer.java

void moveStreams(List<Host> hosts, AtomicInteger hostIdxMoveFrom, int moveFromLowWaterMark,
        AtomicInteger hostIdxMoveTo, int moveToHighWaterMark, Optional<RateLimiter> rateLimiter) {
    if (hostIdxMoveFrom.get() < 0 || hostIdxMoveFrom.get() >= hosts.size() || hostIdxMoveTo.get() < 0
            || hostIdxMoveTo.get() >= hosts.size() || hostIdxMoveFrom.get() >= hostIdxMoveTo.get()) {
        return;//ww w.  ja  v  a  2s  . co  m
    }

    if (logger.isDebugEnabled()) {
        logger.debug(
                "Moving streams : hosts = {}, from = {}, to = {} : from_low_water_mark = {}, to_high_water_mark = {}",
                new Object[] { hosts, hostIdxMoveFrom.get(), hostIdxMoveTo.get(), moveFromLowWaterMark,
                        moveToHighWaterMark });
    }

    Host hostMoveFrom = hosts.get(hostIdxMoveFrom.get());
    int numStreamsOnFromHost = hostMoveFrom.streams.size();
    if (numStreamsOnFromHost <= moveFromLowWaterMark) {
        // do nothing
        return;
    }

    int numStreamsToMove = numStreamsOnFromHost - moveFromLowWaterMark;
    LinkedList<String> streamsToMove = new LinkedList<String>(hostMoveFrom.streams);
    Collections.shuffle(streamsToMove);

    if (logger.isDebugEnabled()) {
        logger.debug("Try to move {} streams from host {} : streams = {}",
                new Object[] { numStreamsToMove, hostMoveFrom.address, streamsToMove });
    }

    while (numStreamsToMove-- > 0 && !streamsToMove.isEmpty()) {
        if (rateLimiter.isPresent()) {
            rateLimiter.get().acquire();
        }

        // pick a host to move
        Host hostMoveTo = hosts.get(hostIdxMoveTo.get());
        while (hostMoveTo.streams.size() >= moveToHighWaterMark) {
            int hostIdx = hostIdxMoveTo.decrementAndGet();
            logger.info("move to host : {}, from {}", hostIdx, hostIdxMoveFrom.get());
            if (hostIdx <= hostIdxMoveFrom.get()) {
                return;
            } else {
                hostMoveTo = hosts.get(hostIdx);
                if (logger.isDebugEnabled()) {
                    logger.debug("Target host to move moved to host {} @ {}", hostIdx, hostMoveTo);
                }
            }
        }

        // pick a stream
        String stream = streamsToMove.remove();

        // move the stream
        if (moveStream(stream, hostMoveFrom, hostMoveTo)) {
            hostMoveFrom.streams.remove(stream);
            hostMoveTo.streams.add(stream);
        }
    }

}

From source file:org.apache.distributedlog.service.balancer.ClusterBalancer.java

void moveStreams(List<Host> hosts, AtomicInteger hostIdxMoveFrom, int moveFromLowWaterMark,
        AtomicInteger hostIdxMoveTo, int moveToHighWaterMark, Optional<RateLimiter> rateLimiter) {
    if (hostIdxMoveFrom.get() < 0 || hostIdxMoveFrom.get() >= hosts.size() || hostIdxMoveTo.get() < 0
            || hostIdxMoveTo.get() >= hosts.size() || hostIdxMoveFrom.get() >= hostIdxMoveTo.get()) {
        return;//  w  w w .ja v a2s . c  om
    }

    if (logger.isDebugEnabled()) {
        logger.debug(
                "Moving streams : hosts = {}, from = {}, to = {} :"
                        + " from_low_water_mark = {}, to_high_water_mark = {}",
                new Object[] { hosts, hostIdxMoveFrom.get(), hostIdxMoveTo.get(), moveFromLowWaterMark,
                        moveToHighWaterMark });
    }

    Host hostMoveFrom = hosts.get(hostIdxMoveFrom.get());
    int numStreamsOnFromHost = hostMoveFrom.streams.size();
    if (numStreamsOnFromHost <= moveFromLowWaterMark) {
        // do nothing
        return;
    }

    int numStreamsToMove = numStreamsOnFromHost - moveFromLowWaterMark;
    LinkedList<String> streamsToMove = new LinkedList<String>(hostMoveFrom.streams);
    Collections.shuffle(streamsToMove);

    if (logger.isDebugEnabled()) {
        logger.debug("Try to move {} streams from host {} : streams = {}",
                new Object[] { numStreamsToMove, hostMoveFrom.address, streamsToMove });
    }

    while (numStreamsToMove-- > 0 && !streamsToMove.isEmpty()) {
        if (rateLimiter.isPresent()) {
            rateLimiter.get().acquire();
        }

        // pick a host to move
        Host hostMoveTo = hosts.get(hostIdxMoveTo.get());
        while (hostMoveTo.streams.size() >= moveToHighWaterMark) {
            int hostIdx = hostIdxMoveTo.decrementAndGet();
            logger.info("move to host : {}, from {}", hostIdx, hostIdxMoveFrom.get());
            if (hostIdx <= hostIdxMoveFrom.get()) {
                return;
            } else {
                hostMoveTo = hosts.get(hostIdx);
                if (logger.isDebugEnabled()) {
                    logger.debug("Target host to move moved to host {} @ {}", hostIdx, hostMoveTo);
                }
            }
        }

        // pick a stream
        String stream = streamsToMove.remove();

        // move the stream
        if (moveStream(stream, hostMoveFrom, hostMoveTo)) {
            hostMoveFrom.streams.remove(stream);
            hostMoveTo.streams.add(stream);
        }
    }

}

From source file:com.ibm.jaggr.service.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk./*from w ww  .  j a  va  2  s  .  c  o m*/
 * 
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization 
 *            resources that are check on every server restart                     
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        cacheBust = aggregator.getOptions().getCacheBust();
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && rawConfig.equals(cached.rawConfig) && !validateDeps && !clean) {
        depMap = cached.depMap;
        return;
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode(PathUtil.getModuleName(path));
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one 
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (nodes with no children or dependency lists)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureQueue.java

/**
 * Verify that "write" operations for a single table are serialized,
 * but different tables can be executed in parallel.
 *///w w w. java2s.  c om
@Test(timeout = 90000)
public void testConcurrentWriteOps() throws Exception {
    final TestTableProcSet procSet = new TestTableProcSet(queue);

    final int NUM_ITEMS = 10;
    final int NUM_TABLES = 4;
    final AtomicInteger opsCount = new AtomicInteger(0);
    for (int i = 0; i < NUM_TABLES; ++i) {
        TableName tableName = TableName.valueOf(String.format("testtb-%04d", i));
        for (int j = 1; j < NUM_ITEMS; ++j) {
            procSet.addBack(new TestTableProcedure(i * 100 + j, tableName,
                    TableProcedureInterface.TableOperationType.EDIT));
            opsCount.incrementAndGet();
        }
    }
    assertEquals(opsCount.get(), queue.size());

    final Thread[] threads = new Thread[NUM_TABLES * 2];
    final HashSet<TableName> concurrentTables = new HashSet<TableName>();
    final ArrayList<String> failures = new ArrayList<String>();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new Thread() {
            @Override
            public void run() {
                while (opsCount.get() > 0) {
                    try {
                        TableProcedureInterface proc = procSet.acquire();
                        if (proc == null) {
                            queue.signalAll();
                            if (opsCount.get() > 0) {
                                continue;
                            }
                            break;
                        }
                        synchronized (concurrentTables) {
                            assertTrue("unexpected concurrency on " + proc.getTableName(),
                                    concurrentTables.add(proc.getTableName()));
                        }
                        assertTrue(opsCount.decrementAndGet() >= 0);
                        try {
                            long procId = ((Procedure) proc).getProcId();
                            TableName tableId = proc.getTableName();
                            int concurrent = concurrentCount.incrementAndGet();
                            assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES,
                                    concurrent >= 1 && concurrent <= NUM_TABLES);
                            LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            Thread.sleep(2000);
                            concurrent = concurrentCount.decrementAndGet();
                            LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES);
                        } finally {
                            synchronized (concurrentTables) {
                                assertTrue(concurrentTables.remove(proc.getTableName()));
                            }
                            procSet.release(proc);
                        }
                    } catch (Throwable e) {
                        LOG.error("Failed " + e.getMessage(), e);
                        synchronized (failures) {
                            failures.add(e.getMessage());
                        }
                    } finally {
                        queue.signalAll();
                    }
                }
            }
        };
        threads[i].start();
    }
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }
    assertTrue(failures.toString(), failures.isEmpty());
    assertEquals(0, opsCount.get());
    assertEquals(0, queue.size());

    for (int i = 1; i <= NUM_TABLES; ++i) {
        TableName table = TableName.valueOf(String.format("testtb-%04d", i));
        assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table));
    }
}

From source file:com.redhat.red.build.koji.KojiClient.java

protected Map<String, KojiClientException> uploadForImport(KojiImport buildInfo,
        Supplier<Iterable<ImportFile>> outputSupplier, String dirname, KojiSessionInfo session)
        throws KojiClientException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    try {/*from ww  w .  j a  va 2s .  c  om*/
        objectMapper.writeValue(baos, buildInfo);
    } catch (IOException e) {
        throw new KojiClientException("Failed to serialize import info to JSON. Reason: %s", e, e.getMessage());
    }

    AtomicInteger count = new AtomicInteger(0);
    uploadService.submit(
            newUploader(new ImportFile(METADATA_JSON_FILE, new ByteArrayInputStream(baos.toByteArray())),
                    dirname, session));

    count.incrementAndGet();

    outputSupplier.get().forEach((importFile) -> {
        uploadService.submit(newUploader(importFile, dirname, session));
        count.incrementAndGet();
    });

    Logger logger = LoggerFactory.getLogger(getClass());
    Map<String, KojiClientException> uploadErrors = new HashMap<>();
    Set<UploadResponse> responses = new HashSet<>();
    int total = count.get();
    do {
        logger.debug("Waiting for %d uploads.", count.get());

        try {
            Future<KojiUploaderResult> future = uploadService.take();
            KojiUploaderResult result = future.get();
            KojiClientException error = result.getError();
            if (error != null) {
                uploadErrors.put(result.getImportFile().getFilePath(), error);
            } else {
                responses.add(result.getResponse());
            }
        } catch (InterruptedException e) {
            logger.debug("Interrupted while uploading. Aborting upload.");
            break;
        } catch (ExecutionException e) {
            throw new KojiClientException("Failed to execute %d uploads for: %s. Reason: %s", e, total,
                    buildInfo, e.getMessage());
        }
    } while (count.decrementAndGet() > 0);

    return uploadErrors;
}