Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.quotas.TestSpaceQuotasWithSnapshots.java

void waitForStableRegionSizeReport(Connection conn, TableName tn) throws Exception {
    // For some stability in the value before proceeding
    // Helps make sure that we got the actual last value, not some inbetween
    AtomicLong lastValue = new AtomicLong(-1);
    AtomicInteger counter = new AtomicInteger(0);
    TEST_UTIL.waitFor(15_000, 500, new Predicate<Exception>() {
        @Override/*from  w w w  .  j  ava 2  s  .co m*/
        public boolean evaluate() throws Exception {
            LOG.debug("Last observed size=" + lastValue.get());
            long actual = getRegionSizeReportForTable(conn, tn);
            if (actual == lastValue.get()) {
                int numMatches = counter.incrementAndGet();
                if (numMatches >= 5) {
                    return true;
                }
                // Not yet..
                return false;
            }
            counter.set(0);
            lastValue.set(actual);
            return false;
        }
    });
}

From source file:org.apache.storm.messaging.netty.NettyTest.java

private void doTestBatch(Map<String, Object> stormConf) throws Exception {
    int numMessages = 100_000;
    LOG.info("Should send and receive many messages (testing with " + numMessages + " messages)");
    ArrayList<TaskMessage> responses = new ArrayList<>();
    AtomicInteger received = new AtomicInteger();
    IContext context = TransportFactory.makeContext(stormConf);
    try {// w  ww  .j  av  a 2  s  .c o  m
        try (IConnection server = context.bind(null, 0);
                IConnection client = context.connect(null, "localhost", server.getPort(), remoteBpStatus)) {
            server.registerRecv(mkConnectionCallback((message) -> {
                responses.add(message);
                received.incrementAndGet();
            }));
            waitUntilReady(client, server);

            IntStream.range(1, numMessages)
                    .forEach(i -> send(client, taskId, String.valueOf(i).getBytes(StandardCharsets.UTF_8)));

            Testing.whileTimeout(Testing.TEST_TIMEOUT_MS, () -> responses.size() < numMessages - 1, () -> {
                LOG.info("{} of {} received", responses.size(), numMessages - 1);
                sleep().run();
            });
            IntStream.range(1, numMessages).forEach(i -> {
                assertThat(new String(responses.get(i - 1).message(), StandardCharsets.UTF_8),
                        is(String.valueOf(i)));
            });
        }
    } finally {
        context.term();
    }
}

From source file:org.apache.solr.client.solrj.impl.CloudSolrClientCacheTest.java

public void testCaching() throws Exception {
    String collName = "gettingstarted";
    Set<String> livenodes = new HashSet<>();
    Map<String, ClusterState.CollectionRef> refs = new HashMap<>();
    Map<String, DocCollection> colls = new HashMap<>();

    class Ref extends ClusterState.CollectionRef {
        private String c;

        public Ref(String c) {
            super(null);
            this.c = c;
        }// w  w  w .  j  a v  a2  s . c om

        @Override
        public boolean isLazilyLoaded() {
            return true;
        }

        @Override
        public DocCollection get() {
            gets.incrementAndGet();
            return colls.get(c);
        }
    }
    Map<String, Function> responses = new HashMap<>();
    NamedList okResponse = new NamedList();
    okResponse.add("responseHeader", new NamedList<>(Collections.singletonMap("status", 0)));

    LBHttpSolrClient mockLbclient = getMockLbHttpSolrClient(responses);
    AtomicInteger lbhttpRequestCount = new AtomicInteger();
    try (CloudSolrClient cloudClient = new CloudSolrClientBuilder(getStateProvider(livenodes, refs))
            .withLBHttpSolrClient(mockLbclient).build()) {
        livenodes.addAll(ImmutableSet.of("192.168.1.108:7574_solr", "192.168.1.108:8983_solr"));
        ClusterState cs = ClusterState.load(1, coll1State.getBytes(UTF_8), Collections.emptySet(),
                "/collections/gettingstarted/state.json");
        refs.put(collName, new Ref(collName));
        colls.put(collName, cs.getCollectionOrNull(collName));
        responses.put("request", o -> {
            int i = lbhttpRequestCount.incrementAndGet();
            if (i == 1)
                return new ConnectException("TEST");
            if (i == 2)
                return new SocketException("TEST");
            if (i == 3)
                return new NoHttpResponseException("TEST");
            return okResponse;
        });
        UpdateRequest update = new UpdateRequest().add("id", "123", "desc", "Something 0");

        cloudClient.request(update, collName);
        assertEquals(2, refs.get(collName).getCount());
    }

}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

public void testSniffOnFailureNotInitialized() {
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    final AtomicInteger scheduleCalls = new AtomicInteger(0);
    Scheduler scheduler = new Scheduler() {
        @Override//from   w  w w .j a  v a  2  s. c om
        public Future<?> schedule(Sniffer.Task task, long delayMillis) {
            scheduleCalls.incrementAndGet();
            return null;
        }

        @Override
        public void shutdown() {
        }
    };

    Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
    for (int i = 0; i < 10; i++) {
        sniffer.sniffOnFailure();
    }
    assertEquals(1, scheduleCalls.get());
    int totalRuns = hostsSniffer.runs.get();
    assertEquals(0, totalRuns);
    int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
    verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
    verifyNoMoreInteractions(restClient);
}

From source file:org.apache.nifi.processors.standard.TestListenSyslog.java

@Test
public void testErrorQueue() throws IOException {
    final List<ListenSyslog.RawSyslogEvent> msgs = new ArrayList<>();
    msgs.add(new ListenSyslog.RawSyslogEvent(VALID_MESSAGE.getBytes(), "sender-01"));
    msgs.add(new ListenSyslog.RawSyslogEvent(VALID_MESSAGE.getBytes(), "sender-01"));

    // Add message that will throw a FlowFileAccessException the first time that we attempt to read
    // the contents but will succeed the second time.
    final AtomicInteger getMessageAttempts = new AtomicInteger(0);
    msgs.add(new ListenSyslog.RawSyslogEvent(VALID_MESSAGE.getBytes(), "sender-01") {
        @Override//  w  w  w.  j  a va  2 s. c om
        public byte[] getData() {
            final int attempts = getMessageAttempts.incrementAndGet();
            if (attempts == 1) {
                throw new FlowFileAccessException("Unit test failure");
            } else {
                return VALID_MESSAGE.getBytes();
            }
        }
    });

    final CannedMessageProcessor proc = new CannedMessageProcessor(msgs);
    final TestRunner runner = TestRunners.newTestRunner(proc);
    runner.setProperty(ListenSyslog.MAX_BATCH_SIZE, "5");
    runner.setProperty(ListenSyslog.PROTOCOL, ListenSyslog.UDP_VALUE.getValue());
    runner.setProperty(ListenSyslog.PORT, "0");
    runner.setProperty(ListenSyslog.PARSE_MESSAGES, "false");

    runner.run();
    assertEquals(1, proc.getErrorQueueSize());
    runner.assertAllFlowFilesTransferred(ListenSyslog.REL_SUCCESS, 1);
    runner.getFlowFilesForRelationship(ListenSyslog.REL_SUCCESS).get(0)
            .assertContentEquals(VALID_MESSAGE + "\n" + VALID_MESSAGE);

    // running again should pull from the error queue
    runner.clearTransferState();
    runner.run();
    runner.assertAllFlowFilesTransferred(ListenSyslog.REL_SUCCESS, 1);
    runner.getFlowFilesForRelationship(ListenSyslog.REL_SUCCESS).get(0).assertContentEquals(VALID_MESSAGE);
}

From source file:org.apache.lens.driver.hive.TestRemoteHiveDriver.java

/**
 * Test multi thread client./*from w ww  .  j  a va 2  s  . co  m*/
 *
 * @throws Exception the exception
 */
@Test
public void testMultiThreadClient() throws Exception {
    log.info("@@ Starting multi thread test");
    SessionState.get().setCurrentDatabase(dataBase);
    final SessionState state = SessionState.get();
    // Launch two threads
    createTestTable("test_multithreads");
    Configuration thConf = new Configuration(driverConf);
    thConf.setLong(HiveDriver.HS2_CONNECTION_EXPIRY_DELAY, 10000);
    final HiveDriver thrDriver = new HiveDriver();
    thrDriver.configure(thConf, "hive", "hive1");
    QueryContext ctx = createContext("USE " + dataBase, queryConf, thrDriver);
    thrDriver.execute(ctx);

    // Launch a select query
    final int QUERIES = 5;
    int launchedQueries = 0;
    final int THREADS = 5;
    final long POLL_DELAY = 500;
    List<Thread> thrs = new ArrayList<Thread>();
    List<QueryContext> queries = new ArrayList<>();
    final AtomicInteger errCount = new AtomicInteger();
    for (int q = 0; q < QUERIES; q++) {
        final QueryContext qctx;
        try {
            qctx = createContext("SELECT * FROM test_multithreads", queryConf, thrDriver);
            thrDriver.executeAsync(qctx);
            queries.add(qctx);
        } catch (LensException e) {
            errCount.incrementAndGet();
            log.info(q + " executeAsync error: " + e.getCause());
            continue;
        }
        log.info("@@ Launched query: " + q + " " + qctx.getQueryHandle());
        launchedQueries++;
        // Launch many threads to poll for status
        final QueryHandle handle = qctx.getQueryHandle();
        for (int i = 0; i < THREADS; i++) {
            int thid = q * THREADS + i;
            Thread th = new Thread(new Runnable() {
                @Override
                public void run() {
                    SessionState.setCurrentSessionState(state);
                    for (int i = 0; i < 1000; i++) {
                        try {
                            thrDriver.updateStatus(qctx);
                            if (qctx.getDriverStatus().isFinished()) {
                                log.info("@@ " + handle.getHandleId() + " >> "
                                        + qctx.getDriverStatus().getState());
                                break;
                            }
                            Thread.sleep(POLL_DELAY);
                        } catch (LensException e) {
                            log.error("Got Exception " + e.getCause(), e);
                            errCount.incrementAndGet();
                            break;
                        } catch (InterruptedException e) {
                            log.error("Encountred Interrupted exception", e);
                            break;
                        }
                    }
                }
            });
            thrs.add(th);
            th.setName("Poller#" + (thid));
            th.start();
        }
    }

    for (Thread th : thrs) {
        try {
            th.join(10000);
        } catch (InterruptedException e) {
            log.warn("Not ended yet: " + th.getName());
        }
    }
    for (QueryContext queryContext : queries) {
        thrDriver.closeQuery(queryContext.getQueryHandle());
    }
    Assert.assertEquals(0, thrDriver.getHiveHandleSize());
    log.info("@@ Completed all pollers. Total thrift errors: " + errCount.get());
    assertEquals(launchedQueries, QUERIES);
    assertEquals(thrs.size(), QUERIES * THREADS);
    assertEquals(errCount.get(), 0);
}

From source file:org.apache.bookkeeper.metadata.etcd.EtcdRegistrationTest.java

private void testConcurrentRegistration(boolean readonly) throws Exception {
    final String bookieId;
    if (readonly) {
        bookieId = runtime.getMethodName() + "-readonly:3181";
    } else {//w w  w . j  ava 2  s  .  c o m
        bookieId = runtime.getMethodName() + ":3181";
    }
    final int numBookies = 10;
    @Cleanup("shutdown")
    ExecutorService executor = Executors.newFixedThreadPool(numBookies);
    final CyclicBarrier startBarrier = new CyclicBarrier(numBookies);
    final CyclicBarrier completeBarrier = new CyclicBarrier(numBookies);
    final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
    final AtomicInteger numSuccesses = new AtomicInteger(0);
    final AtomicInteger numFailures = new AtomicInteger(0);
    for (int i = 0; i < numBookies; i++) {
        executor.submit(() -> {
            try (EtcdRegistrationManager regMgr = new EtcdRegistrationManager(newEtcdClient(), scope, 1)) {
                try {
                    startBarrier.await();
                    regMgr.registerBookie(bookieId, readonly);
                    numSuccesses.incrementAndGet();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to start", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Start barrier is broken", e);
                } catch (BookieException e) {
                    numFailures.incrementAndGet();
                }
                try {
                    completeBarrier.await();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to complete", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Complete barrier is broken", e);
                }
                FutureUtils.complete(doneFuture, null);
            }
        });
    }
    doneFuture.join();
    assertEquals(1, numSuccesses.get());
    assertEquals(numBookies - 1, numFailures.get());
}

From source file:org.apache.hadoop.hbase.zookeeper.lock.TestZKInterProcessReadWriteLock.java

@Test(timeout = 30000)
public void testReadLockDoesNotExcludeReaders() throws Exception {
    final String testName = "testReadLockDoesNotExcludeReaders";
    final ZKInterProcessReadWriteLock readWriteLock = getReadWriteLock(testName);
    final CountDownLatch locksAcquiredLatch = new CountDownLatch(NUM_THREADS);
    final AtomicInteger locksHeld = new AtomicInteger(0);
    List<Future<Void>> results = Lists.newArrayList();
    for (int i = 0; i < NUM_THREADS; ++i) {
        final String threadDesc = testName + i;
        results.add(executor.submit(new Callable<Void>() {
            @Override//  w  w  w  .j  ava  2s .c o  m
            public Void call() throws Exception {
                ZKInterProcessReadLock readLock = readWriteLock.readLock(Bytes.toBytes(threadDesc));
                readLock.acquire();
                try {
                    locksHeld.incrementAndGet();
                    locksAcquiredLatch.countDown();
                    Thread.sleep(1000);
                } finally {
                    readLock.release();
                    locksHeld.decrementAndGet();
                }
                return null;
            }
        }));
    }
    locksAcquiredLatch.await();
    assertEquals(locksHeld.get(), NUM_THREADS);
    MultithreadedTestUtil.assertOnFutures(results);
}

From source file:com.netflix.curator.framework.recipes.queue.TestDistributedQueue.java

@Test
public void testPutListener() throws Exception {
    final int itemQty = 10;

    DistributedQueue<TestQueueItem> queue = null;
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();//from   ww w.j  a  v a  2 s  .  c  o  m
    try {
        BlockingQueueConsumer<TestQueueItem> consumer = new BlockingQueueConsumer<TestQueueItem>(
                Mockito.mock(ConnectionStateListener.class));

        queue = QueueBuilder.builder(client, consumer, serializer, QUEUE_PATH).buildQueue();
        queue.start();

        QueueTestProducer producer = new QueueTestProducer(queue, itemQty, 0);

        final AtomicInteger listenerCalls = new AtomicInteger(0);
        QueuePutListener<TestQueueItem> listener = new QueuePutListener<TestQueueItem>() {
            @Override
            public void putCompleted(TestQueueItem item) {
                listenerCalls.incrementAndGet();
            }

            @Override
            public void putMultiCompleted(MultiItem<TestQueueItem> items) {
            }
        };
        queue.getPutListenerContainer().addListener(listener);

        ExecutorService service = Executors.newCachedThreadPool();
        service.submit(producer);

        int iteration = 0;
        while (consumer.size() < itemQty) {
            Assert.assertTrue(++iteration < 10);
            Thread.sleep(1000);
        }

        int i = 0;
        for (TestQueueItem item : consumer.getItems()) {
            Assert.assertEquals(item.str, Integer.toString(i++));
        }

        Assert.assertEquals(listenerCalls.get(), itemQty);
    } finally {
        IOUtils.closeQuietly(queue);
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.nifi.processors.standard.Wait.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    final ComponentLog logger = getLogger();

    // Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support
    final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER);
    final Integer bufferCount = context.getProperty(WAIT_BUFFER_COUNT).asInteger();

    final Map<Relationship, List<FlowFile>> processedFlowFiles = new HashMap<>();
    final Function<Relationship, List<FlowFile>> getFlowFilesFor = r -> processedFlowFiles.computeIfAbsent(r,
            k -> new ArrayList<>());

    final AtomicReference<String> targetSignalId = new AtomicReference<>();
    final AtomicInteger bufferedCount = new AtomicInteger(0);
    final List<FlowFile> failedFilteringFlowFiles = new ArrayList<>();
    final Supplier<FlowFileFilter.FlowFileFilterResult> acceptResultSupplier = () -> bufferedCount
            .incrementAndGet() == bufferCount ? ACCEPT_AND_TERMINATE : ACCEPT_AND_CONTINUE;
    final List<FlowFile> flowFiles = session.get(f -> {

        final String fSignalId = signalIdProperty.evaluateAttributeExpressions(f).getValue();

        // if the computed value is null, or empty, we transfer the FlowFile to failure relationship
        if (StringUtils.isBlank(fSignalId)) {
            // We can't penalize f before getting it from session, so keep it in a temporal list.
            logger.error("FlowFile {} has no attribute for given Release Signal Identifier",
                    new Object[] { f });
            failedFilteringFlowFiles.add(f);
            return ACCEPT_AND_CONTINUE;
        }/*w  w  w.  j a v  a 2 s  . com*/

        final String targetSignalIdStr = targetSignalId.get();
        if (targetSignalIdStr == null) {
            // This is the first one.
            targetSignalId.set(fSignalId);
            return acceptResultSupplier.get();
        }

        if (targetSignalIdStr.equals(fSignalId)) {
            return acceptResultSupplier.get();
        }

        return REJECT_AND_CONTINUE;

    });

    final String attributeCopyMode = context.getProperty(ATTRIBUTE_COPY_MODE).getValue();
    final boolean replaceOriginalAttributes = ATTRIBUTE_COPY_REPLACE.getValue().equals(attributeCopyMode);
    final AtomicReference<Signal> signalRef = new AtomicReference<>();

    final Consumer<FlowFile> transferToFailure = flowFile -> {
        flowFile = session.penalize(flowFile);
        getFlowFilesFor.apply(REL_FAILURE).add(flowFile);
    };

    final Consumer<Entry<Relationship, List<FlowFile>>> transferFlowFiles = routedFlowFiles -> {
        Relationship relationship = routedFlowFiles.getKey();

        if (REL_WAIT.equals(relationship)) {
            final String waitMode = context.getProperty(WAIT_MODE).getValue();

            if (WAIT_MODE_KEEP_IN_UPSTREAM.getValue().equals(waitMode)) {
                // Transfer to self.
                relationship = Relationship.SELF;
            }
        }

        final List<FlowFile> flowFilesWithSignalAttributes = routedFlowFiles.getValue().stream()
                .map(f -> copySignalAttributes(session, f, signalRef.get(), replaceOriginalAttributes))
                .collect(Collectors.toList());
        session.transfer(flowFilesWithSignalAttributes, relationship);
    };

    failedFilteringFlowFiles.forEach(f -> {
        flowFiles.remove(f);
        transferToFailure.accept(f);
    });

    if (flowFiles.isEmpty()) {
        // If there was nothing but failed FlowFiles while filtering, transfer those and end immediately.
        processedFlowFiles.entrySet().forEach(transferFlowFiles);
        return;
    }

    // the cache client used to interact with the distributed cache
    final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE)
            .asControllerService(AtomicDistributedMapCacheClient.class);
    final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache);

    final String signalId = targetSignalId.get();
    final Signal signal;

    // get notifying signal
    try {
        signal = protocol.getSignal(signalId);
        signalRef.set(signal);
    } catch (final IOException e) {
        throw new ProcessException(String.format("Failed to get signal for %s due to %s", signalId, e), e);
    }

    String targetCounterName = null;
    long targetCount = 1;
    int releasableFlowFileCount = 1;

    final List<FlowFile> candidates = new ArrayList<>();

    for (FlowFile flowFile : flowFiles) {
        // Set wait start timestamp if it's not set yet
        String waitStartTimestamp = flowFile.getAttribute(WAIT_START_TIMESTAMP);
        if (waitStartTimestamp == null) {
            waitStartTimestamp = String.valueOf(System.currentTimeMillis());
            flowFile = session.putAttribute(flowFile, WAIT_START_TIMESTAMP, waitStartTimestamp);
        }

        long lWaitStartTimestamp;
        try {
            lWaitStartTimestamp = Long.parseLong(waitStartTimestamp);
        } catch (NumberFormatException nfe) {
            logger.error("{} has an invalid value '{}' on FlowFile {}",
                    new Object[] { WAIT_START_TIMESTAMP, waitStartTimestamp, flowFile });
            transferToFailure.accept(flowFile);
            continue;
        }

        // check for expiration
        long expirationDuration = context.getProperty(EXPIRATION_DURATION).asTimePeriod(TimeUnit.MILLISECONDS);
        long now = System.currentTimeMillis();
        if (now > (lWaitStartTimestamp + expirationDuration)) {
            logger.info("FlowFile {} expired after {}ms",
                    new Object[] { flowFile, (now - lWaitStartTimestamp) });
            getFlowFilesFor.apply(REL_EXPIRED).add(flowFile);
            continue;
        }

        // If there's no signal yet, then we don't have to evaluate target counts. Return immediately.
        if (signal == null) {
            if (logger.isDebugEnabled()) {
                logger.debug("No release signal found for {} on FlowFile {} yet",
                        new Object[] { signalId, flowFile });
            }
            getFlowFilesFor.apply(REL_WAIT).add(flowFile);
            continue;
        }

        // Fix target counter name and count from current FlowFile, if those are not set yet.
        if (candidates.isEmpty()) {
            targetCounterName = context.getProperty(SIGNAL_COUNTER_NAME).evaluateAttributeExpressions(flowFile)
                    .getValue();
            try {
                targetCount = Long.valueOf(context.getProperty(TARGET_SIGNAL_COUNT)
                        .evaluateAttributeExpressions(flowFile).getValue());
            } catch (final NumberFormatException e) {
                transferToFailure.accept(flowFile);
                logger.error("Failed to parse targetCount when processing {} due to {}",
                        new Object[] { flowFile, e }, e);
                continue;
            }
            try {
                releasableFlowFileCount = Integer.valueOf(context.getProperty(RELEASABLE_FLOWFILE_COUNT)
                        .evaluateAttributeExpressions(flowFile).getValue());
            } catch (final NumberFormatException e) {
                transferToFailure.accept(flowFile);
                logger.error("Failed to parse releasableFlowFileCount when processing {} due to {}",
                        new Object[] { flowFile, e }, e);
                continue;
            }
        }

        // FlowFile is now validated and added to candidates.
        candidates.add(flowFile);
    }

    boolean waitCompleted = false;
    boolean waitProgressed = false;
    if (signal != null && !candidates.isEmpty()) {

        if (releasableFlowFileCount > 1) {
            signal.releaseCandidatese(targetCounterName, targetCount, releasableFlowFileCount, candidates,
                    released -> getFlowFilesFor.apply(REL_SUCCESS).addAll(released),
                    waiting -> getFlowFilesFor.apply(REL_WAIT).addAll(waiting));
            waitProgressed = !getFlowFilesFor.apply(REL_SUCCESS).isEmpty();

        } else {
            // releasableFlowFileCount = 0 or 1
            boolean reachedTargetCount = StringUtils.isBlank(targetCounterName)
                    ? signal.isTotalCountReached(targetCount)
                    : signal.isCountReached(targetCounterName, targetCount);

            if (reachedTargetCount) {
                if (releasableFlowFileCount == 0) {
                    getFlowFilesFor.apply(REL_SUCCESS).addAll(candidates);
                } else {
                    // releasableFlowFileCount = 1
                    getFlowFilesFor.apply(REL_SUCCESS).add(candidates.remove(0));
                    getFlowFilesFor.apply(REL_WAIT).addAll(candidates);
                    // If releasableFlowFileCount == 0, leave signal as it is,
                    // so that any number of FlowFile can be released as long as target count condition matches.
                    waitCompleted = true;
                }
            } else {
                getFlowFilesFor.apply(REL_WAIT).addAll(candidates);
            }
        }
    }

    // Transfer FlowFiles.
    processedFlowFiles.entrySet().forEach(transferFlowFiles);

    // Update signal if needed.
    try {
        if (waitCompleted) {
            protocol.complete(signalId);
        } else if (waitProgressed) {
            protocol.replace(signal);
        }

    } catch (final IOException e) {
        session.rollback();
        throw new ProcessException(
                String.format("Unable to communicate with cache while updating %s due to %s", signalId, e), e);
    }

}