Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.joyent.manta.client.multipart.JobsMultipartManager.java

/**
 * Completes a multipart transfer by assembling the parts on Manta.
 * This is an asynchronous operation and you will need to call
 * {@link #waitForCompletion(MantaMultipartUpload, Duration, int, Function)}
 * to block until the operation completes.
 *
 * @param upload multipart upload object
 * @param partsStream stream of multipart part objects
 * @throws IOException thrown if there is a problem connecting to Manta
 *//*from  w ww  . j  a  va  2s. c  o m*/
@Override
public void complete(final JobsMultipartUpload upload,
        final Stream<? extends MantaMultipartUploadTuple> partsStream) throws IOException {
    Validate.notNull(upload, "Multipart upload object must not be null");
    LOGGER.debug("Completing multipart upload [{}]", upload.getId());

    final String uploadDir = multipartUploadDir(upload.getId());
    final MultipartMetadata metadata = downloadMultipartMetadata(upload.getId());

    final Map<String, MantaMultipartUploadPart> listing = new HashMap<>();
    try (Stream<MantaMultipartUploadPart> listStream = listParts(upload).limit(getMaxParts())) {
        listStream.forEach(p -> listing.put(p.getEtag(), p));
    }

    final String path = metadata.getPath();

    final StringBuilder jobExecText = new StringBuilder("set -o pipefail; mget -q ");

    List<MantaMultipartUploadTuple> missingTuples = new ArrayList<>();

    final AtomicInteger count = new AtomicInteger(0);

    try (Stream<? extends MantaMultipartUploadTuple> distinct = partsStream.sorted().distinct()) {
        distinct.forEach(part -> {
            final int i = count.incrementAndGet();

            if (i > getMaxParts()) {
                String msg = String.format(
                        "Too many multipart parts specified [%d]. " + "The maximum number of parts is %d",
                        getMaxParts(), count.get());
                throw new IllegalArgumentException(msg);
            }

            // Catch and log any gaps in part numbers
            if (i != part.getPartNumber()) {
                missingTuples.add(new MantaMultipartUploadTuple(i, "N/A"));
            } else {
                final MantaMultipartUploadPart o = listing.get(part.getEtag());

                if (o != null) {
                    jobExecText.append(o.getObjectPath()).append(" ");
                } else {
                    missingTuples.add(part);
                }
            }
        });
    }

    if (!missingTuples.isEmpty()) {
        final MantaMultipartException e = new MantaMultipartException(
                "Multipart part(s) specified couldn't be found");

        int missingCount = 0;
        for (MantaMultipartUploadTuple missingPart : missingTuples) {
            String key = String.format("missing_part_%d", ++missingCount);
            e.setContextValue(key, missingPart.toString());
        }

        throw e;
    }

    final String headerFormat = "\"%s: %s\" ";

    jobExecText.append("| mput ").append("-H ")
            .append(String.format(headerFormat, UPLOAD_ID_METADATA_KEY, upload.getId())).append("-H ")
            .append(String.format(headerFormat, JOB_ID_METADATA_KEY, "$MANTA_JOB_ID")).append("-q ");

    if (metadata.getContentType() != null) {
        jobExecText.append("-H 'Content-Type: ").append(metadata.getContentType()).append("' ");
    }

    MantaMetadata objectMetadata = metadata.getObjectMetadata();

    if (objectMetadata != null) {
        Set<Map.Entry<String, String>> entries = objectMetadata.entrySet();

        for (Map.Entry<String, String> entry : entries) {
            jobExecText.append("-H '").append(entry.getKey()).append(": ").append(entry.getValue())
                    .append("' ");
        }
    }
    jobExecText.append(path);

    final MantaJobPhase concatPhase = new MantaJobPhase().setType("reduce").setExec(jobExecText.toString());

    final MantaJobPhase cleanupPhase = new MantaJobPhase().setType("reduce").setExec("mrm -r " + uploadDir);

    MantaJobBuilder.Run run = mantaClient.jobBuilder().newJob(String.format(JOB_NAME_FORMAT, upload.getId()))
            .addPhase(concatPhase).addPhase(cleanupPhase).run();

    // We write the job id to Metadata object so that we can query it easily
    writeJobIdToMetadata(upload.getId(), run.getId());

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("Created job for concatenating parts: {}", run.getId());
    }
}

From source file:org.waarp.openr66.protocol.http.rest.test.HttpTestResponseHandler.java

@Override
protected boolean afterDbOptions(Channel channel, RestArgument ra) throws HttpInvalidAuthenticationException {
    HttpTestRestR66Client.count.incrementAndGet();
    boolean newMessage = false;
    AtomicInteger counter = null;
    RestFuture future = channel.attr(HttpRestClientSimpleResponseHandler.RESTARGUMENT).get();
    if (future.getOtherObject() == null) {
        counter = new AtomicInteger();
        future.setOtherObject(counter);/*  w w  w  . j  av a 2 s.c o  m*/
        JsonNode node = ra.getDetailedAllowOption();
        if (!node.isMissingNode()) {
            for (JsonNode jsonNode : node) {
                Iterator<String> iterator = jsonNode.fieldNames();
                while (iterator.hasNext()) {
                    String name = iterator.next();
                    if (!jsonNode.path(name).path(RestArgument.REST_FIELD.JSON_PATH.field).isMissingNode()) {
                        break;
                    }
                    if (name.equals(RootOptionsRestMethodHandler.ROOT)) {
                        continue;
                    }
                    counter.incrementAndGet();
                    HttpTestRestR66Client.options(channel, name);
                    newMessage = true;
                }
            }
        }
    }
    if (!newMessage) {
        counter = (AtomicInteger) future.getOtherObject();
        newMessage = counter.decrementAndGet() > 0;
        if (!newMessage) {
            future.setOtherObject(null);
        }
    }
    if (!newMessage) {
        WaarpSslUtility.closingSslChannel(channel);
    }
    return newMessage;
}

From source file:io.pravega.controller.eventProcessor.impl.SerializedRequestHandlerTest.java

@Test(timeout = 10000)
public void testPostponeEvent() throws InterruptedException, ExecutionException {
    AtomicInteger postponeS1e1Count = new AtomicInteger();
    AtomicInteger postponeS1e2Count = new AtomicInteger();
    AtomicBoolean allowCompletion = new AtomicBoolean(false);

    SerializedRequestHandler<TestEvent> requestHandler = new SerializedRequestHandler<TestEvent>(
            executorService()) {//from   w  ww .j a v a2  s.c o  m
        @Override
        public CompletableFuture<Void> processEvent(TestEvent event) {
            if (!event.future.isDone()) {
                return Futures.failedFuture(new TestPostponeException());
            }
            return event.getFuture();
        }

        @Override
        public boolean toPostpone(TestEvent event, long pickupTime, Throwable exception) {

            boolean retval = true;

            if (allowCompletion.get()) {
                if (event.number == 1) {
                    postponeS1e1Count.incrementAndGet();
                    retval = exception instanceof TestPostponeException && postponeS1e1Count.get() < 2;
                }

                if (event.number == 2) {
                    postponeS1e2Count.incrementAndGet();
                    retval = exception instanceof TestPostponeException
                            && (System.currentTimeMillis() - pickupTime < Duration.ofMillis(100).toMillis());
                }
            }

            return retval;
        }
    };

    List<Pair<TestEvent, CompletableFuture<Void>>> stream1Queue = requestHandler
            .getEventQueueForKey(getKeyForStream("scope", "stream1"));
    assertNull(stream1Queue);
    // post 3 work for stream1
    TestEvent s1e1 = new TestEvent("scope", "stream1", 1);
    CompletableFuture<Void> s1p1 = requestHandler.process(s1e1);
    TestEvent s1e2 = new TestEvent("scope", "stream1", 2);
    CompletableFuture<Void> s1p2 = requestHandler.process(s1e2);
    TestEvent s1e3 = new TestEvent("scope", "stream1", 3);
    CompletableFuture<Void> s1p3 = requestHandler.process(s1e3);

    // post events for some more arbitrary streams in background
    AtomicBoolean stop = new AtomicBoolean(false);

    runBackgroundStreamProcessing("stream2", requestHandler, stop);
    runBackgroundStreamProcessing("stream3", requestHandler, stop);
    runBackgroundStreamProcessing("stream4", requestHandler, stop);

    s1e3.complete();
    // verify that s1p3 completes.
    assertTrue(Futures.await(s1p3));
    // verify that s1e1 and s1e2 are still not complete.
    assertTrue(!s1e1.getFuture().isDone());
    assertTrue(!s1p1.isDone());
    assertTrue(!s1e2.getFuture().isDone());
    assertTrue(!s1p2.isDone());

    // Allow completion
    allowCompletion.set(true);

    assertFalse(Futures.await(s1p1));
    assertFalse(Futures.await(s1p2));
    AssertExtensions.assertThrows("", s1p1::join, e -> Exceptions.unwrap(e) instanceof TestPostponeException);
    AssertExtensions.assertThrows("", s1p2::join, e -> Exceptions.unwrap(e) instanceof TestPostponeException);
    assertTrue(postponeS1e1Count.get() == 2);
    assertTrue(postponeS1e2Count.get() > 0);
    stop.set(true);
}

From source file:org.apache.hadoop.hbase.master.CatalogJanitor.java

/**
 * Scans hbase:meta and returns a number of scanned rows, and a map of merged
 * regions, and an ordered map of split parents. if the given table name is
 * null, return merged regions and split parents of all tables, else only the
 * specified table//  w  w w  .j  a  v a 2s.c o m
 * @param tableName null represents all tables
 * @return triple of scanned rows, and map of merged regions, and map of split
 *         parent regioninfos
 * @throws IOException
 */
Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(
        final TableName tableName) throws IOException {
    final boolean isTableSpecified = (tableName != null);
    // TODO: Only works with single hbase:meta region currently.  Fix.
    final AtomicInteger count = new AtomicInteger(0);
    // Keep Map of found split parents.  There are candidates for cleanup.
    // Use a comparator that has split parents come before its daughters.
    final Map<HRegionInfo, Result> splitParents = new TreeMap<HRegionInfo, Result>(
            new SplitParentFirstComparator());
    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
    // This visitor collects split parents and counts rows in the hbase:meta table

    MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() {
        @Override
        public boolean processRow(Result r) throws IOException {
            if (r == null || r.isEmpty())
                return true;
            count.incrementAndGet();
            HRegionInfo info = HRegionInfo.getHRegionInfo(r);
            if (info == null)
                return true; // Keep scanning
            if (isTableSpecified && info.getTable().compareTo(tableName) > 0) {
                // Another table, stop scanning
                return false;
            }
            if (info.isSplitParent())
                splitParents.put(info, r);
            if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
                mergedRegions.put(info, r);
            }
            // Returning true means "keep scanning"
            return true;
        }
    };

    // Run full scan of hbase:meta catalog table passing in our custom visitor with
    // the start row
    MetaScanner.metaScan(server.getConfiguration(), null, visitor, tableName);

    return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(count.get(), mergedRegions,
            splitParents);
}

From source file:org.glassfish.jersey.examples.sseitemstore.jaxrs.JaxrsItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link JaxrsItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *///from   w w w  .  j  av a2s . c  o m
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final SseEventSource[] sources = new SseEventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final SseEventSource es = SseEventSource.target(itemsTarget.path("events")).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (null == inboundEvent.getName()) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);
        items.forEach((item) -> postItem(itemsTarget, item));

        assertTrue("Waiting to receive all events has timed out.",
                latch.await((1000 + MAX_LISTENERS * RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    items.forEach(
            (item) -> assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item)));

    final AtomicInteger queueId = new AtomicInteger(0);
    indexQueues.forEach((indexes) -> {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId.get(),
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId.get(), items.size(),
                indexes.size());
        queueId.incrementAndGet();
    });

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:de.blizzy.rust.lootconfig.LootConfigDump.java

private void run() throws IOException {
    LootConfig config = loadConfig(configFile);

    Table<LootContainer, Category, Multiset<Float>> dropChances = HashBasedTable.create();

    Collection<LootContainer> lootContainers = config.LootContainers.values();
    config.Categories.values().stream().filter(Category::hasItemsOrBlueprints).forEach(category -> {
        lootContainers.forEach(lootContainer -> {
            Multiset<Float> categoryInContainerDropChances = getItemCategoryDropChances(category,
                    lootContainer);//from   w  w w .ja  v  a  2s . co  m
            if (!categoryInContainerDropChances.isEmpty()) {
                dropChances.put(lootContainer, category, categoryInContainerDropChances);
            }
        });
    });

    dropChances.rowKeySet().stream()
            .filter(lootContainer -> SHOW_DMLOOT || !lootContainer.name.contains("dmloot"))
            .sorted((lootContainer1, lootContainer2) -> Collator.getInstance().compare(lootContainer1.name,
                    lootContainer2.name))
            .forEach(lootContainer -> {
                System.out.printf("%s (blueprint fragments: %s)", lootContainer,
                        lootContainer.DistributeFragments ? "yes" : "no").println();
                Map<Category, Multiset<Float>> lootContainerDropChances = dropChances.row(lootContainer);
                AtomicDouble lootContainerDropChancesSum = new AtomicDouble();
                AtomicInteger categoriesCount = new AtomicInteger();
                lootContainerDropChances.entrySet().stream().sorted(this::compareByChances).limit(7)
                        .forEach(categoryDropChancesEntry -> {
                            Category category = categoryDropChancesEntry.getKey();
                            Multiset<Float> categoryDropChances = categoryDropChancesEntry.getValue();
                            float categoryDropChancesSum = sum(categoryDropChances);
                            lootContainerDropChancesSum.addAndGet(categoryDropChancesSum);
                            System.out.printf("  %s %s%s%s", formatPercent(categoryDropChancesSum), category,
                                    (category.Items.size() > 0) ? " (" + formatItems(category) + ")" : "",
                                    (category.Blueprints.size() > 0) ? " [" + formatBlueprints(category) + "]"
                                            : "")
                                    .println();
                            categoriesCount.incrementAndGet();
                        });
                if (categoriesCount.get() < lootContainerDropChances.size()) {
                    System.out.printf("  %s other (%d)",
                            formatPercent(1f - (float) lootContainerDropChancesSum.get()),
                            lootContainerDropChances.size() - categoriesCount.get()).println();
                }
            });
}

From source file:com.alibaba.druid.benckmark.pool.Case3.java

private void p0(final DataSource dataSource, String name, int threadCount) throws Exception {
    final AtomicInteger count = new AtomicInteger();
    final AtomicInteger errorCount = new AtomicInteger();

    final CountDownLatch startLatch = new CountDownLatch(1);
    final CountDownLatch endLatch = new CountDownLatch(threadCount);
    for (int i = 0; i < threadCount; ++i) {
        Thread thread = new Thread() {

            public void run() {
                try {
                    startLatch.await();/*from   w ww.j a  va  2s .  c  o  m*/

                    for (int i = 0; i < LOOP_COUNT; ++i) {
                        Connection conn = dataSource.getConnection();
                        Statement stmt = conn.createStatement();
                        ResultSet rs = stmt.executeQuery(sql);
                        while (rs.next()) {
                            rs.getInt(1);
                        }
                        rs.close();
                        stmt.close();

                        conn.close();
                        count.incrementAndGet();
                    }
                } catch (Throwable ex) {
                    errorCount.incrementAndGet();
                    ex.printStackTrace();
                } finally {
                    endLatch.countDown();
                }
            }
        };
        thread.start();
    }
    long startMillis = System.currentTimeMillis();
    long startYGC = TestUtil.getYoungGC();
    long startFullGC = TestUtil.getFullGC();
    startLatch.countDown();
    endLatch.await();

    long millis = System.currentTimeMillis() - startMillis;
    long ygc = TestUtil.getYoungGC() - startYGC;
    long fullGC = TestUtil.getFullGC() - startFullGC;

    Assert.assertEquals(LOOP_COUNT * threadCount, count.get());
    Thread.sleep(1);

    System.out.println("thread " + threadCount + " " + name + " millis : "
            + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC " + fullGC);
}

From source file:org.eclipse.hono.service.registration.impl.FileBasedRegistrationService.java

private void loadRegistrationData() {
    if (filename != null) {
        final FileSystem fs = vertx.fileSystem();
        log.debug("trying to load device registration information from file {}", filename);
        if (fs.existsBlocking(filename)) {
            final AtomicInteger deviceCount = new AtomicInteger();
            fs.readFile(filename, readAttempt -> {
                if (readAttempt.succeeded()) {
                    JsonArray allObjects = new JsonArray(new String(readAttempt.result().getBytes()));
                    for (Object obj : allObjects) {
                        JsonObject tenant = (JsonObject) obj;
                        String tenantId = tenant.getString(FIELD_TENANT);
                        Map<String, JsonObject> deviceMap = new HashMap<>();
                        for (Object deviceObj : tenant.getJsonArray(ARRAY_DEVICES)) {
                            JsonObject device = (JsonObject) deviceObj;
                            deviceMap.put(device.getString(FIELD_HONO_ID), device.getJsonObject(FIELD_DATA));
                            deviceCount.incrementAndGet();
                        }/*from  w ww. j  a v a  2s.  co  m*/
                        identities.put(tenantId, deviceMap);
                    }
                    log.info("successfully loaded {} device identities from file [{}]", deviceCount.get(),
                            filename);
                } else {
                    log.warn("could not load device identities from file [{}]", filename, readAttempt.cause());
                }
            });
        } else {
            log.debug("device identity file {} does not exist (yet)", filename);
        }
    }
}

From source file:com.netflix.curator.framework.recipes.barriers.TestDistributedDoubleBarrier.java

@Test
public void testMultiClient() throws Exception {
    final Timing timing = new Timing();
    final CountDownLatch postEnterLatch = new CountDownLatch(QTY);
    final CountDownLatch postLeaveLatch = new CountDownLatch(QTY);
    final AtomicInteger count = new AtomicInteger(0);
    final AtomicInteger max = new AtomicInteger(0);
    List<Future<Void>> futures = Lists.newArrayList();
    ExecutorService service = Executors.newCachedThreadPool();
    for (int i = 0; i < QTY; ++i) {
        Future<Void> future = service.submit(new Callable<Void>() {
            @Override//w  w  w . j  a  v  a2 s . c  om
            public Void call() throws Exception {
                CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                        timing.session(), timing.connection(), new RetryOneTime(1));
                try {
                    client.start();
                    DistributedDoubleBarrier barrier = new DistributedDoubleBarrier(client, "/barrier", QTY);

                    Assert.assertTrue(barrier.enter(timing.seconds(), TimeUnit.SECONDS));

                    synchronized (TestDistributedDoubleBarrier.this) {
                        int thisCount = count.incrementAndGet();
                        if (thisCount > max.get()) {
                            max.set(thisCount);
                        }
                    }

                    postEnterLatch.countDown();
                    Assert.assertTrue(timing.awaitLatch(postEnterLatch));

                    Assert.assertEquals(count.get(), QTY);

                    Assert.assertTrue(barrier.leave(timing.seconds(), TimeUnit.SECONDS));
                    count.decrementAndGet();

                    postLeaveLatch.countDown();
                    Assert.assertTrue(timing.awaitLatch(postEnterLatch));
                } finally {
                    IOUtils.closeQuietly(client);
                }

                return null;
            }
        });
        futures.add(future);
    }

    for (Future<Void> f : futures) {
        f.get();
    }
    Assert.assertEquals(count.get(), 0);
    Assert.assertEquals(max.get(), QTY);
}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.java

/**
 * Verify that "write" operations for a single table are serialized,
 * but different tables can be executed in parallel.
 *///from   w ww.  ja v a 2  s.c o  m
@Test(timeout = 90000)
public void testConcurrentWriteOps() throws Exception {
    final TestTableProcSet procSet = new TestTableProcSet(queue);

    final int NUM_ITEMS = 10;
    final int NUM_TABLES = 4;
    final AtomicInteger opsCount = new AtomicInteger(0);
    for (int i = 0; i < NUM_TABLES; ++i) {
        TableName tableName = TableName.valueOf(String.format("testtb-%04d", i));
        for (int j = 1; j < NUM_ITEMS; ++j) {
            procSet.addBack(new TestTableProcedure(i * 100 + j, tableName,
                    TableProcedureInterface.TableOperationType.EDIT));
            opsCount.incrementAndGet();
        }
    }
    assertEquals(opsCount.get(), queue.size());

    final Thread[] threads = new Thread[NUM_TABLES * 2];
    final HashSet<TableName> concurrentTables = new HashSet<TableName>();
    final ArrayList<String> failures = new ArrayList<String>();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new Thread() {
            @Override
            public void run() {
                while (opsCount.get() > 0) {
                    try {
                        Procedure proc = procSet.acquire();
                        if (proc == null) {
                            queue.signalAll();
                            if (opsCount.get() > 0) {
                                continue;
                            }
                            break;
                        }

                        TableName tableId = procSet.getTableName(proc);
                        synchronized (concurrentTables) {
                            assertTrue("unexpected concurrency on " + tableId, concurrentTables.add(tableId));
                        }
                        assertTrue(opsCount.decrementAndGet() >= 0);
                        try {
                            long procId = proc.getProcId();
                            int concurrent = concurrentCount.incrementAndGet();
                            assertTrue("inc-concurrent=" + concurrent + " 1 <= concurrent <= " + NUM_TABLES,
                                    concurrent >= 1 && concurrent <= NUM_TABLES);
                            LOG.debug("[S] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            Thread.sleep(2000);
                            concurrent = concurrentCount.decrementAndGet();
                            LOG.debug("[E] tableId=" + tableId + " procId=" + procId + " concurrent="
                                    + concurrent);
                            assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES);
                        } finally {
                            synchronized (concurrentTables) {
                                assertTrue(concurrentTables.remove(tableId));
                            }
                            procSet.release(proc);
                        }
                    } catch (Throwable e) {
                        LOG.error("Failed " + e.getMessage(), e);
                        synchronized (failures) {
                            failures.add(e.getMessage());
                        }
                    } finally {
                        queue.signalAll();
                    }
                }
            }
        };
        threads[i].start();
    }
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }
    assertTrue(failures.toString(), failures.isEmpty());
    assertEquals(0, opsCount.get());
    assertEquals(0, queue.size());

    for (int i = 1; i <= NUM_TABLES; ++i) {
        TableName table = TableName.valueOf(String.format("testtb-%04d", i));
        assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table));
    }
}