Example usage for java.util.concurrent.atomic AtomicLong set

List of usage examples for java.util.concurrent.atomic AtomicLong set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong set.

Prototype

public final void set(long newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.nifi.processors.standard.TailFile.java

private void processTailFile(final ProcessContext context, final ProcessSession session,
        final String tailFile) {
    // If user changes the file that is being tailed, we need to consume the already-rolled-over data according
    // to the Initial Start Position property
    boolean rolloverOccurred;
    TailFileObject tfo = states.get(tailFile);

    if (tfo.isTailFileChanged()) {
        rolloverOccurred = false;//from   ww w.j a v  a 2 s  .  com
        final String recoverPosition = context.getProperty(START_POSITION).getValue();

        if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) {
            recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(),
                    tfo.getState().getTimestamp(), tfo.getState().getPosition());
        } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) {
            cleanup();
            tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer()));
        } else {
            final String filename = tailFile;
            final File file = new File(filename);

            try {
                final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
                getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file });

                final Checksum checksum = new CRC32();
                final long position = file.length();
                final long timestamp = file.lastModified();

                try (final InputStream fis = new FileInputStream(file);
                        final CheckedInputStream in = new CheckedInputStream(fis, checksum)) {
                    StreamUtils.copy(in, new NullOutputStream(), position);
                }

                fileChannel.position(position);
                cleanup();
                tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(),
                        checksum, tfo.getState().getBuffer()));
            } catch (final IOException ioe) {
                getLogger().error(
                        "Attempted to position Reader at current position in file {} but failed to do so due to {}",
                        new Object[] { file, ioe.toString() }, ioe);
                context.yield();
                return;
            }
        }

        tfo.setTailFileChanged(false);
    } else {
        // Recover any data that may have rolled over since the last time that this processor ran.
        // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value
        // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered"
        // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case,
        // use whatever checksum value is currently in the state.
        Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum();
        if (expectedChecksumValue == null) {
            expectedChecksumValue = tfo.getState().getChecksum() == null ? null
                    : tfo.getState().getChecksum().getValue();
        }

        rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue,
                tfo.getState().getTimestamp(), tfo.getState().getPosition());
        tfo.setExpectedRecoveryChecksum(null);
    }

    // initialize local variables from state object; this is done so that we can easily change the values throughout
    // the onTrigger method and then create a new state object after we finish processing the files.
    TailFileState state = tfo.getState();
    File file = state.getFile();
    FileChannel reader = state.getReader();
    Checksum checksum = state.getChecksum();
    if (checksum == null) {
        checksum = new CRC32();
    }
    long position = state.getPosition();
    long timestamp = state.getTimestamp();
    long length = state.getLength();

    // Create a reader if necessary.
    if (file == null || reader == null) {
        file = new File(tailFile);
        reader = createReader(file, position);
        if (reader == null) {
            context.yield();
            return;
        }
    }

    final long startNanos = System.nanoTime();

    // Check if file has rotated
    if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length())
            || (timestamp < file.lastModified() && length >= file.length())) {

        // Since file has rotated, we close the reader, create a new one, and then reset our state.
        try {
            reader.close();
            getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader });
        } catch (final IOException ioe) {
            getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe });
        }

        reader = createReader(file, 0L);
        position = 0L;
        checksum.reset();
    }

    if (file.length() == position || !file.exists()) {
        // no data to consume so rather than continually running, yield to allow other processors to use the thread.
        getLogger().debug("No data to consume; created no FlowFiles");
        tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
                state.getBuffer()));
        persistState(tfo, context);
        context.yield();
        return;
    }

    // If there is data to consume, read as much as we can.
    final TailFileState currentState = state;
    final Checksum chksum = checksum;
    // data has been written to file. Stream it to a new FlowFile.
    FlowFile flowFile = session.create();

    final FileChannel fileReader = reader;
    final AtomicLong positionHolder = new AtomicLong(position);
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream rawOut) throws IOException {
            try (final OutputStream out = new BufferedOutputStream(rawOut)) {
                positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum));
            }
        }
    });

    // If there ended up being no data, just remove the FlowFile
    if (flowFile.getSize() == 0) {
        session.remove(flowFile);
        getLogger().debug("No data to consume; removed created FlowFile");
    } else {
        // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension>
        final String tailFilename = file.getName();
        final String baseName = StringUtils.substringBeforeLast(tailFilename, ".");
        final String flowFileName;
        if (baseName.length() < tailFilename.length()) {
            flowFileName = baseName + "." + position + "-" + positionHolder.get() + "."
                    + StringUtils.substringAfterLast(tailFilename, ".");
        } else {
            flowFileName = baseName + "." + position + "-" + positionHolder.get();
        }

        final Map<String, String> attributes = new HashMap<>(3);
        attributes.put(CoreAttributes.FILENAME.key(), flowFileName);
        attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain");
        attributes.put("tailfile.original.path", tailFile);
        flowFile = session.putAllAttributes(flowFile, attributes);

        session.getProvenanceReporter().receive(flowFile, file.toURI().toString(),
                "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
        session.transfer(flowFile, REL_SUCCESS);
        position = positionHolder.get();

        // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state.
        // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date
        // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the
        // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the
        // rotated file a second time.
        timestamp = Math.max(state.getTimestamp(), file.lastModified());
        length = file.length();
        getLogger().debug("Created {} and routed to success", new Object[] { flowFile });
    }

    // Create a new state object to represent our current position, timestamp, etc.
    tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
            state.getBuffer()));

    // We must commit session before persisting state in order to avoid data loss on restart
    session.commit();
    persistState(tfo, context);
}

From source file:org.apache.nifi.processors.hive.SelectHiveQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;//  w  w  w .  jav  a  2  s  .c  om

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final HiveDBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(HiveDBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    final String selectQuery;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, new InputStreamCallback() {
            @Override
            public void process(InputStream in) throws IOException {
                queryContents.append(IOUtils.toString(in));
            }
        });
        selectQuery = queryContents.toString();
    }

    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();

    try (final Connection con = dbcpService.getConnection();
            final Statement st = (flowbased ? con.prepareStatement(selectQuery) : con.createStatement())) {

        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            flowfile = session.create();
        } else {
            flowfile = fileToProcess;
        }

        flowfile = session.write(flowfile, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing query {}", new Object[] { selectQuery });
                    if (flowbased) {
                        // Hive JDBC Doesn't Support this yet:
                        // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                        // int paramCount = pmd.getParameterCount();

                        // Alternate way to determine number of params in SQL.
                        int paramCount = StringUtils.countMatches(selectQuery, "?");

                        if (paramCount > 0) {
                            setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                        }
                    }

                    final ResultSet resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                            : st.executeQuery(selectQuery));

                    if (AVRO.equals(outputFormat)) {
                        nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out));
                    } else if (CSV.equals(outputFormat)) {
                        CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter, quote,
                                escape);
                        nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                    } else {
                        nrOfRows.set(0L);
                        throw new ProcessException("Unsupported output format: " + outputFormat);
                    }
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // Set attribute for how many rows were selected
        flowfile = session.putAttribute(flowfile, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        // Set MIME type on output document and add extension to filename
        if (AVRO.equals(outputFormat)) {
            flowfile = session.putAttribute(flowfile, CoreAttributes.MIME_TYPE.key(), AVRO_MIME_TYPE);
            flowfile = session.putAttribute(flowfile, CoreAttributes.FILENAME.key(),
                    flowfile.getAttribute(CoreAttributes.FILENAME.key()) + ".avro");
        } else if (CSV.equals(outputFormat)) {
            flowfile = session.putAttribute(flowfile, CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
            flowfile = session.putAttribute(flowfile, CoreAttributes.FILENAME.key(),
                    flowfile.getAttribute(CoreAttributes.FILENAME.key()) + ".csv");
        }

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { flowfile, nrOfRows.get() });

        if (context.hasIncomingConnection()) {
            // If the flow file came from an incoming connection, issue a Modify Content provenance event

            session.getProvenanceReporter().modifyContent(flowfile, "Retrieved " + nrOfRows.get() + " rows",
                    stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        } else {
            // If we created a flow file from rows received from Hive, issue a Receive provenance event
            session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                    stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        }
        session.transfer(flowfile, REL_SUCCESS);
    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { selectQuery, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    } finally {

    }
}

From source file:org.hyperledger.fabric.sdkintegration.End2endMTIT.java

void runChannel(HFClient client, Channel channel, final int workerId, final int runId, SampleOrg sampleOrg,
        final int delta, final int start) {
    int ret = -1;

    class ChaincodeEventCapture { //A test class to capture chaincode events
        final String handle;
        final BlockEvent blockEvent;
        final ChaincodeEvent chaincodeEvent;

        ChaincodeEventCapture(String handle, BlockEvent blockEvent, ChaincodeEvent chaincodeEvent) {
            this.handle = handle;
            this.blockEvent = blockEvent;
            this.chaincodeEvent = chaincodeEvent;
        }/* w ww  . j  a  v a 2 s. c  om*/
    }
    Vector<ChaincodeEventCapture> chaincodeEvents = new Vector<>(); // Test list to capture chaincode events.

    try {

        final String channelName = channel.getName();
        boolean isFooChain = FOO_CHANNEL_NAME.equals(channelName);
        out("Running channel %s", channelName);

        Collection<Orderer> orderers = channel.getOrderers();
        final ChaincodeID chaincodeID;
        Collection<ProposalResponse> responses;
        Collection<ProposalResponse> successful = new LinkedList<>();
        Collection<ProposalResponse> failed = new LinkedList<>();

        // Register a chaincode event listener that will trigger for any chaincode id and only for EXPECTED_EVENT_NAME event.

        //            String chaincodeEventListenerHandle = channel.registerChaincodeEventListener(Pattern.compile(".*"),
        //                    Pattern.compile(Pattern.quote(EXPECTED_EVENT_NAME)),
        //                    (handle, blockEvent, chaincodeEvent) -> {
        //
        //                        chaincodeEvents.add(new ChaincodeEventCapture(handle, blockEvent, chaincodeEvent));
        //
        //                        String es = blockEvent.getPeer() != null ? blockEvent.getPeer().getName() : blockEvent.getEventHub().getName();
        //                        out("RECEIVED Chaincode event with handle: %s, chaincode Id: %s, chaincode event name: %s, "
        //                                        + "transaction id: %s, event payload: \"%s\", from eventhub: %s",
        //                                handle, chaincodeEvent.getChaincodeId(),
        //                                chaincodeEvent.getEventName(),
        //                                chaincodeEvent.getTxId(),
        //                                new String(chaincodeEvent.getPayload()), es);
        //
        //                    });

        //For non foo channel unregister event listener to test events are not called.
        //            if (!isFooChain) {
        //                channel.unregisterChaincodeEventListener(chaincodeEventListenerHandle);
        //                chaincodeEventListenerHandle = null;
        //
        //            }

        ChaincodeID.Builder chaincodeIDBuilder = ChaincodeID.newBuilder().setName(CHAIN_CODE_NAME)
                .setVersion(CHAIN_CODE_VERSION);
        if (null != CHAIN_CODE_PATH) {
            chaincodeIDBuilder.setPath(CHAIN_CODE_PATH);

        }
        chaincodeID = chaincodeIDBuilder.build();

        successful.clear();
        failed.clear();

        final User user = sampleOrg.getUser(TESTUSER_1_NAME);

        ///////////////
        /// Send transaction proposal to all peers
        TransactionProposalRequest transactionProposalRequest = client.newTransactionProposalRequest();
        transactionProposalRequest.setChaincodeID(chaincodeID);
        transactionProposalRequest.setChaincodeLanguage(CHAIN_CODE_LANG);
        transactionProposalRequest.setUserContext(user);
        //transactionProposalRequest.setFcn("invoke");
        transactionProposalRequest.setFcn("move");
        transactionProposalRequest.setProposalWaitTime(testConfig.getProposalWaitTime());
        transactionProposalRequest.setArgs("a" + workerId, "b" + workerId, delta + "");

        Map<String, byte[]> tm2 = new HashMap<>();
        tm2.put("HyperLedgerFabric", "TransactionProposalRequest:JavaSDK".getBytes(UTF_8)); //Just some extra junk in transient map
        tm2.put("method", "TransactionProposalRequest".getBytes(UTF_8)); // ditto
        tm2.put("result", ":)".getBytes(UTF_8)); // This should be returned see chaincode why.
        tm2.put(EXPECTED_EVENT_NAME, EXPECTED_EVENT_DATA); //This should trigger an event see chaincode why.

        transactionProposalRequest.setTransientMap(tm2);

        out("Sending transactionProposal to all peers with arguments: move(a%d,b%d,%d) with b at %d", workerId,
                workerId, delta, start);

        Collection<ProposalResponse> transactionPropResp = channel
                .sendTransactionProposal(transactionProposalRequest, channel.getPeers());
        for (ProposalResponse response : transactionPropResp) {
            if (response.getStatus() == ProposalResponse.Status.SUCCESS) {
                out("Successful channel%s worker id %d transaction proposal response Txid: %s from peer %s",
                        channelName, workerId, response.getTransactionID(), response.getPeer().getName());
                successful.add(response);
            } else {
                failed.add(response);
            }
        }

        // Check that all the proposals are consistent with each other. We should have only one set
        // where all the proposals above are consistent. Note the when sending to Orderer this is done automatically.
        //  Shown here as an example that applications can invoke and select.
        // See org.hyperledger.fabric.sdk.proposal.consistency_validation config property.
        Collection<Set<ProposalResponse>> proposalConsistencySets = SDKUtils
                .getProposalConsistencySets(transactionPropResp);
        if (proposalConsistencySets.size() != 1) {
            fail(format("Expected only one set of consistent proposal responses but got %d",
                    proposalConsistencySets.size()));
        }

        out("Channel %s worker id %d, received %d transaction proposal responses. Successful+verified: %d . Failed: %d",
                channelName, workerId, transactionPropResp.size(), successful.size(), failed.size());
        if (failed.size() > 0) {
            ProposalResponse firstTransactionProposalResponse = failed.iterator().next();
            fail("Not enough endorsers for invoke(move a,b,100):" + failed.size() + " endorser error: "
                    + firstTransactionProposalResponse.getMessage() + ". Was verified: "
                    + firstTransactionProposalResponse.isVerified());
        }
        out("Channel %s, worker id %d successfully received transaction proposal responses.", channelName,
                workerId);

        ProposalResponse resp = successful.iterator().next();
        byte[] x = resp.getChaincodeActionResponsePayload(); // This is the data returned by the chaincode.
        String resultAsString = null;
        if (x != null) {
            resultAsString = new String(x, "UTF-8");
        }
        assertEquals(":)", resultAsString);

        assertEquals(200, resp.getChaincodeActionResponseStatus()); //Chaincode's status.

        TxReadWriteSetInfo readWriteSetInfo = resp.getChaincodeActionResponseReadWriteSetInfo();
        //See blockwalker below how to transverse this
        assertNotNull(readWriteSetInfo);
        assertTrue(readWriteSetInfo.getNsRwsetCount() > 0);

        ChaincodeID cid = resp.getChaincodeID();
        assertNotNull(cid);
        final String path = cid.getPath();
        if (null == CHAIN_CODE_PATH) {
            assertTrue(path == null || "".equals(path));

        } else {

            assertEquals(CHAIN_CODE_PATH, path);

        }

        assertEquals(CHAIN_CODE_NAME, cid.getName());
        assertEquals(CHAIN_CODE_VERSION, cid.getVersion());

        ////////////////////////////
        // Send Transaction Transaction to orderer
        out("Sending chaincode transaction(move a%d,b%d,%d) to orderer. with b value %d", workerId, workerId,
                delta, start);

        channel.sendTransaction(successful, user).thenApply(transactionEvent -> {
            try {

                waitOnFabric(0);

                assertTrue(transactionEvent.isValid()); // must be valid to be here.
                out("Channel %s worker id %d Finished transaction with transaction id %s", channelName,
                        workerId, transactionEvent.getTransactionID());
                testTxID = transactionEvent.getTransactionID(); // used in the channel queries later

                ////////////////////////////
                // Send Query Proposal to all peers
                //
                String expect = start + delta + "";
                out("Channel %s Now query chaincode for the value of b%d.", channelName, workerId);
                QueryByChaincodeRequest queryByChaincodeRequest = client.newQueryProposalRequest();
                queryByChaincodeRequest.setArgs(new String[] { "b" + workerId });
                queryByChaincodeRequest.setFcn("query");
                queryByChaincodeRequest.setChaincodeID(chaincodeID);

                tm2.clear();
                tm2.put("HyperLedgerFabric", "QueryByChaincodeRequest:JavaSDK".getBytes(UTF_8));
                tm2.put("method", "QueryByChaincodeRequest".getBytes(UTF_8));
                queryByChaincodeRequest.setTransientMap(tm2);

                Collection<ProposalResponse> queryProposals = channel.queryByChaincode(queryByChaincodeRequest,
                        channel.getPeers());
                for (ProposalResponse proposalResponse : queryProposals) {
                    if (!proposalResponse.isVerified()
                            || proposalResponse.getStatus() != ProposalResponse.Status.SUCCESS) {
                        fail("Failed query proposal from peer " + proposalResponse.getPeer().getName()
                                + " status: " + proposalResponse.getStatus() + ". Messages: "
                                + proposalResponse.getMessage() + ". Was verified : "
                                + proposalResponse.isVerified());
                    } else {
                        String payload = proposalResponse.getProposalResponse().getResponse().getPayload()
                                .toStringUtf8();
                        out("Channel %s worker id %d, query payload of b%d from peer %s returned %s and was expecting: %d",
                                channelName, workerId, workerId, proposalResponse.getPeer().getName(), payload,
                                delta + start);
                        assertEquals(expect, payload);
                    }
                }

                return null;
            } catch (Exception e) {
                out("Caught exception while running query");
                e.printStackTrace();
                fail("Failed during chaincode query with error : " + e.getMessage());
            }

            return null;
        }).exceptionally(e -> {
            if (e instanceof TransactionEventException) {
                BlockEvent.TransactionEvent te = ((TransactionEventException) e).getTransactionEvent();
                if (te != null) {
                    throw new AssertionError(format("Transaction with txid %s failed. %s",
                            te.getTransactionID(), e.getMessage()), e);
                }
            }

            throw new AssertionError(
                    format("Test failed with %s exception %s", e.getClass().getName(), e.getMessage()), e);

        }).get(testConfig.getTransactionWaitTime(), TimeUnit.SECONDS);

        // Channel queries

        // We can only send channel queries to peers that are in the same org as the SDK user context
        // Get the peers from the current org being used and pick one randomly to send the queries to.
        //  Set<Peer> peerSet = sampleOrg.getPeers();
        //  Peer queryPeer = peerSet.iterator().next();
        //   out("Using peer %s for channel queries", queryPeer.getName());

        final AtomicLong atomicHeight = new AtomicLong(Long.MAX_VALUE);
        final BlockchainInfo[] bcInfoA = new BlockchainInfo[1];

        channel.getPeers().forEach(peer -> {

            try {
                BlockchainInfo channelInfo2 = channel.queryBlockchainInfo(peer, user);
                final long height = channelInfo2.getHeight();
                if (height < atomicHeight.longValue()) {
                    atomicHeight.set(height);
                    bcInfoA[0] = channelInfo2;

                }

            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            }
        });

        BlockchainInfo channelInfo = bcInfoA[0];
        out("Channel info for : " + channelName);
        out("Channel height: " + channelInfo.getHeight());
        String chainCurrentHash = Hex.encodeHexString(channelInfo.getCurrentBlockHash());
        String chainPreviousHash = Hex.encodeHexString(channelInfo.getPreviousBlockHash());
        out("Chain current block hash: " + chainCurrentHash);
        out("Chainl previous block hash: " + chainPreviousHash);
        final long getBlockNumber = atomicHeight.longValue() - 1L;

        // Query by block number. Should return latest block, i.e. block number 2
        BlockInfo returnedBlock = channel.queryBlockByNumber(getBlockNumber, user);
        String previousHash = Hex.encodeHexString(returnedBlock.getPreviousHash());
        out("queryBlockByNumber returned correct block with blockNumber " + returnedBlock.getBlockNumber()
                + " \n previous_hash " + previousHash);
        assertEquals(getBlockNumber, returnedBlock.getBlockNumber());
        assertEquals(chainPreviousHash, previousHash);

        returnedBlock.getEnvelopeCount();
        out("Worker: %d, run: %d, channel: %s block transaction count: %d", workerId, runId, channelName,
                returnedBlock.getEnvelopeCount());

        // Query by block hash. Using latest block's previous hash so should return block number 1
        byte[] hashQuery = returnedBlock.getPreviousHash();
        returnedBlock = channel.queryBlockByHash(hashQuery, user);
        out("queryBlockByHash returned block with blockNumber " + returnedBlock.getBlockNumber());
        assertEquals(format("query by hash expected block number %d but was %d ", getBlockNumber - 1L,
                returnedBlock.getBlockNumber()), getBlockNumber - 1L, returnedBlock.getBlockNumber());

        // Query block by TxID. Since it's the last TxID, should be block 2
        //TODO RICK         returnedBlock = channel.queryBlockByTransactionID(testTxID);
        //          out("queryBlockByTxID returned block with blockNumber " + returnedBlock.getBlockNumber());
        //         assertEquals(channelInfo.getHeight() - 1, returnedBlock.getBlockNumber());

        // query transaction by ID
        //         TransactionInfo txInfo = channel.queryTransactionByID(testTxID);
        //            out("QueryTransactionByID returned TransactionInfo: txID " + txInfo.getTransactionID()
        //                    + "\n     validation code " + txInfo.getValidationCode().getNumber());

        //            if (chaincodeEventListenerHandle != null) {
        //
        //                channel.unregisterChaincodeEventListener(chaincodeEventListenerHandle);
        //                //Should be two. One event in chaincode and two notification for each of the two event hubs
        //
        //                final int numberEventsExpected = channel.getEventHubs().size() +
        //                        channel.getPeers(EnumSet.of(PeerRole.EVENT_SOURCE)).size();
        //                //just make sure we get the notifications.
        //                for (int i = 15; i > 0; --i) {
        //                    if (chaincodeEvents.size() == numberEventsExpected) {
        //                        break;
        //                    } else {
        //                        Thread.sleep(90); // wait for the events.
        //                    }
        //
        //                }
        //                assertEquals(numberEventsExpected, chaincodeEvents.size());
        //
        //                for (ChaincodeEventCapture chaincodeEventCapture : chaincodeEvents) {
        //                    assertEquals(chaincodeEventListenerHandle, chaincodeEventCapture.handle);
        //                    assertEquals(testTxID, chaincodeEventCapture.chaincodeEvent.getTxId());
        //                    assertEquals(EXPECTED_EVENT_NAME, chaincodeEventCapture.chaincodeEvent.getEventName());
        //                    assertTrue(Arrays.equals(EXPECTED_EVENT_DATA, chaincodeEventCapture.chaincodeEvent.getPayload()));
        //                    assertEquals(CHAIN_CODE_NAME, chaincodeEventCapture.chaincodeEvent.getChaincodeId());
        //
        //                    BlockEvent blockEvent = chaincodeEventCapture.blockEvent;
        //                    assertEquals(channelName, blockEvent.getChannelId());
        //                    //   assertTrue(channel.getEventHubs().contains(blockEvent.getEventHub()));
        //
        //                }
        //
        //            } else {
        //                assertTrue(chaincodeEvents.isEmpty());
        //            }

        out("Running for Channel %s done", channelName);

    } catch (Exception e) {
        out("Caught an exception running channel %s", channel.getName());
        e.printStackTrace();
        fail("Test failed with error : " + e.getMessage());
    }
}

From source file:org.apache.usergrid.persistence.index.impl.EntityIndexTest.java

@Test
public void testIndexThreads() throws IOException {

    long now = System.currentTimeMillis();
    final int threads = 20;
    final int size = 30;

    final String entityType = "thing";

    final CountDownLatch latch = new CountDownLatch(threads);
    final AtomicLong failTime = new AtomicLong(0);
    InputStream is = this.getClass().getResourceAsStream("/sample-large.json");
    ObjectMapper mapper = new ObjectMapper();
    final List<Object> sampleJson = mapper.readValue(is, new TypeReference<List<Object>>() {
    });//from w  w w  .j av  a2  s  . c  o  m
    for (int i = 0; i < threads; i++) {

        final IndexEdge indexEdge = new IndexEdgeImpl(appId, "things", SearchEdge.NodeType.SOURCE, i);

        Thread thread = new Thread(() -> {
            try {

                EntityIndexBatch batch = entityIndex.createBatch();
                insertJsonBlob(sampleJson, batch, entityType, indexEdge, size, 0);
                indexProducer.put(batch.build()).subscribe();
                ;
            } catch (Exception e) {
                synchronized (failTime) {
                    if (failTime.get() == 0) {
                        failTime.set(System.currentTimeMillis());
                    }
                }
                System.out.println(e.toString());
                fail("threw exception");
            } finally {
                latch.countDown();
            }
        });
        thread.start();
    }
    try {
        latch.await();
    } catch (InterruptedException ie) {
        throw new RuntimeException(ie);
    }
    assertTrue("system must have failed at " + (failTime.get() - now), failTime.get() == 0);
}

From source file:com.github.brandtg.switchboard.LogPuller.java

@Override
public void run() {
    HttpClient httpClient = HttpClients.createDefault();
    AtomicLong currentIndex = new AtomicLong(lastIndex);
    HttpHost host = new HttpHost(sourceAddress.getAddress(), sourceAddress.getPort());
    boolean firstLoop = true;

    while (!isShutdown.get()) {
        // Build URI
        StringBuilder sb = new StringBuilder();
        try {/*  w w w.j  a v a 2  s .c o m*/
            if (firstLoop) {
                sb.append("/log/metadata/header?target=").append(sinkAddress.getHostName()).append(":")
                        .append(sinkAddress.getPort());
                firstLoop = false;
            } else {
                sb.append("/log/").append(URLEncoder.encode(collection, ENCODING)).append("/")
                        .append(currentIndex.get()).append("?target=").append(sinkAddress.getHostName())
                        .append(":").append(sinkAddress.getPort());
            }
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

        // TODO: Clean this up a little bit
        int resStatus = -1;
        synchronized (this) {
            HttpEntity entity = null;
            try {
                // Get data
                URI uri = URI.create(sb.toString());
                HttpGet req = new HttpGet(uri);
                HttpResponse res = httpClient.execute(host, req);
                entity = res.getEntity();
                resStatus = res.getStatusLine().getStatusCode();
                if (resStatus == 200) {
                    // Wait for data to be consumed
                    // n.b. This object will be registered as a listener
                    wait();

                    // Update position
                    InputStream inputStream = res.getEntity().getContent();
                    LogRegionResponse metadata = OBJECT_MAPPER.readValue(inputStream, LogRegionResponse.class);
                    currentIndex
                            .set(metadata.getLogRegions().get(metadata.getLogRegions().size() - 1).getIndex());

                    for (LogRegion logRegion : metadata.getLogRegions()) {
                        LOG.info("Received {}", logRegion);
                    }
                }
            } catch (Exception e) {
                LOG.error("Error", e);
            } finally {
                if (entity != null) {
                    try {
                        EntityUtils.consume(entity);
                    } catch (IOException e) {
                        LOG.error("Error", e);
                    }
                }
            }
        }

        // Sleep if did not get data
        if (resStatus != 200) {
            try {
                LOG.debug("No data available, sleeping 1000 ms");
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                LOG.warn("Error while sleeping for more data", e);
            }
        }
    }
}

From source file:org.neo4j.consistency.checking.full.FullCheckIntegrationTest.java

@Test
public void shouldManageUnusedRecordsWithWeirdDataIn() throws Exception {
    // Given//from  w w w . j  a va2  s .  c  o  m
    final AtomicLong id = new AtomicLong();
    fixture.apply(new GraphStoreFixture.Transaction() {
        @Override
        protected void transactionData(TransactionDataBuilder tx, IdGenerator next) {
            id.set(next.relationship());
            RelationshipRecord relationship = new RelationshipRecord(id.get());
            relationship.setFirstNode(-1);
            relationship.setSecondNode(-1);
            relationship.setInUse(true);
            tx.create(relationship);
        }
    });
    fixture.apply(new GraphStoreFixture.Transaction() {
        @Override
        protected void transactionData(TransactionDataBuilder tx, IdGenerator next) {
            RelationshipRecord relationship = new RelationshipRecord(id.get());
            tx.delete(relationship);
        }
    });

    // When
    ConsistencySummaryStatistics stats = check();

    // Then
    assertTrue(stats.isConsistent());
}

From source file:org.apache.hadoop.hbase.regionserver.DefaultMobStoreFlusher.java

@Override
public List<Path> flushSnapshot(SortedSet<KeyValue> snapshot, long cacheFlushId,
        TimeRangeTracker snapshotTimeRangeTracker, AtomicLong flushedSize, MonitoredTask status)
        throws IOException {
    ArrayList<Path> result = new ArrayList<Path>();
    if (snapshot.size() == 0)
        return result; // don't flush if there are no entries

    // Use a store scanner to find which rows to flush.
    long smallestReadPoint = store.getSmallestReadPoint();
    InternalScanner scanner = createScanner(snapshot, smallestReadPoint);
    if (scanner == null) {
        return result; // NULL scanner returned from coprocessor hooks means skip normal processing
    }/*from  w w  w .j a va2s .c  o m*/

    StoreFile.Writer writer;
    long flushed = 0;
    try {
        // TODO:  We can fail in the below block before we complete adding this flush to
        //        list of store files.  Add cleanup of anything put on filesystem if we fail.
        synchronized (flushLock) {
            status.setStatus("Flushing " + store + ": creating writer");
            // Write the map out to the disk
            writer = store.createWriterInTmp(snapshot.size(), store.getFamily().getCompression(), false, true,
                    true);
            writer.setTimeRangeTracker(snapshotTimeRangeTracker);
            try {
                if (!isMob) {
                    // It's not a mob store, flush the cells in a normal way
                    flushed = performFlush(scanner, writer, smallestReadPoint);
                } else {
                    // It's a mob store, flush the cells in a mob way. This is the difference of flushing
                    // between a normal and a mob store.
                    flushed = performMobFlush(snapshot, snapshotTimeRangeTracker, cacheFlushId, scanner, writer,
                            status, smallestReadPoint);
                }
            } finally {
                finalizeWriter(writer, cacheFlushId, status);
            }
        }
    } finally {
        flushedSize.set(flushed);
        scanner.close();
    }
    LOG.info("Flushed, sequenceid=" + cacheFlushId + ", memsize=" + StringUtils.humanReadableInt(flushed)
            + ", hasBloomFilter=" + writer.hasGeneralBloom() + ", into tmp file " + writer.getPath());
    result.add(writer.getPath());
    return result;
}

From source file:com.thinkbiganalytics.nifi.v2.ingest.GetTableData.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = null;/*from w w  w. ja v  a2 s.  com*/
    if (context.hasIncomingConnection()) {
        flowFile = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (flowFile == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final FlowFile incoming = flowFile;
    final ComponentLog logger = getLog();

    final DBCPService dbcpService = context.getProperty(JDBC_SERVICE).asControllerService(DBCPService.class);
    final MetadataProviderService metadataService = context.getProperty(METADATA_SERVICE)
            .asControllerService(MetadataProviderService.class);
    final String loadStrategy = context.getProperty(LOAD_STRATEGY).getValue();
    final String categoryName = context.getProperty(FEED_CATEGORY).evaluateAttributeExpressions(incoming)
            .getValue();
    final String feedName = context.getProperty(FEED_NAME).evaluateAttributeExpressions(incoming).getValue();
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(incoming).getValue();
    final String fieldSpecs = context.getProperty(TABLE_SPECS).evaluateAttributeExpressions(incoming)
            .getValue();
    final String dateField = context.getProperty(DATE_FIELD).evaluateAttributeExpressions(incoming).getValue();
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer overlapTime = context.getProperty(OVERLAP_TIME).evaluateAttributeExpressions(incoming)
            .asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer backoffTime = context.getProperty(BACKOFF_PERIOD).asTimePeriod(TimeUnit.SECONDS).intValue();
    final String unitSize = context.getProperty(UNIT_SIZE).getValue();
    final String outputType = context.getProperty(OUTPUT_TYPE).getValue();
    String outputDelimiter = context.getProperty(OUTPUT_DELIMITER).evaluateAttributeExpressions(incoming)
            .getValue();
    final String delimiter = StringUtils.isBlank(outputDelimiter) ? "," : outputDelimiter;

    final PropertyValue waterMarkPropName = context.getProperty(HIGH_WATER_MARK_PROP)
            .evaluateAttributeExpressions(incoming);

    final String[] selectFields = parseFields(fieldSpecs);

    final LoadStrategy strategy = LoadStrategy.valueOf(loadStrategy);
    final StopWatch stopWatch = new StopWatch(true);

    try (final Connection conn = dbcpService.getConnection()) {

        FlowFile outgoing = (incoming == null ? session.create() : incoming);
        final AtomicLong nrOfRows = new AtomicLong(0L);
        final LastFieldVisitor visitor = new LastFieldVisitor(dateField, null);
        final FlowFile current = outgoing;

        outgoing = session.write(outgoing, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                ResultSet rs = null;
                try {
                    GetTableDataSupport support = new GetTableDataSupport(conn, queryTimeout);
                    if (strategy == LoadStrategy.FULL_LOAD) {
                        rs = support.selectFullLoad(tableName, selectFields);
                    } else if (strategy == LoadStrategy.INCREMENTAL) {
                        String waterMarkValue = getIncrementalWaterMarkValue(current, waterMarkPropName);
                        LocalDateTime waterMarkTime = LocalDateTime.parse(waterMarkValue, DATE_TIME_FORMAT);
                        Date lastLoadDate = toDate(waterMarkTime);
                        visitor.setLastModifyDate(lastLoadDate);
                        rs = support.selectIncremental(tableName, selectFields, dateField, overlapTime,
                                lastLoadDate, backoffTime, GetTableDataSupport.UnitSizes.valueOf(unitSize));
                    } else {
                        throw new RuntimeException("Unsupported loadStrategy [" + loadStrategy + "]");
                    }

                    if (GetTableDataSupport.OutputType.DELIMITED
                            .equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
                        nrOfRows.set(JdbcCommon.convertToDelimitedStream(rs, out,
                                (strategy == LoadStrategy.INCREMENTAL ? visitor : null), delimiter));
                    } else if (GetTableDataSupport.OutputType.AVRO
                            .equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
                        avroSchema = JdbcCommon.createSchema(rs);
                        nrOfRows.set(JdbcCommon.convertToAvroStream(rs, out,
                                (strategy == LoadStrategy.INCREMENTAL ? visitor : null), avroSchema));
                    } else {
                        throw new RuntimeException("Unsupported output format type [" + outputType + "]");
                    }
                } catch (final SQLException e) {
                    throw new IOException("SQL execution failure", e);
                } finally {
                    if (rs != null) {
                        try {
                            if (rs.getStatement() != null) {
                                rs.getStatement().close();
                            }
                            rs.close();
                        } catch (SQLException e) {
                            getLog().error("Error closing sql statement and resultset");
                        }
                    }
                }
            }
        });

        // set attribute how many rows were selected
        outgoing = session.putAttribute(outgoing, RESULT_ROW_COUNT, Long.toString(nrOfRows.get()));

        //set output format type and avro schema for feed setup, if available
        outgoing = session.putAttribute(outgoing, "db.table.output.format", outputType);
        String avroSchemaForFeedSetup = (avroSchema != null) ? JdbcCommon.getAvroSchemaForFeedSetup(avroSchema)
                : EMPTY_STRING;
        outgoing = session.putAttribute(outgoing, "db.table.avro.schema", avroSchemaForFeedSetup);

        session.getProvenanceReporter().modifyContent(outgoing, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));

        // Terminate flow file if no work
        Long rowcount = nrOfRows.get();
        outgoing = session.putAttribute(outgoing, ComponentAttributes.NUM_SOURCE_RECORDS.key(),
                String.valueOf(rowcount));

        if (nrOfRows.get() == 0L) {
            logger.info("{} contains no data; transferring to 'nodata'", new Object[] { outgoing });
            session.transfer(outgoing, REL_NO_DATA);
        } else {

            logger.info("{} contains {} records; transferring to 'success'",
                    new Object[] { outgoing, nrOfRows.get() });

            if (strategy == LoadStrategy.INCREMENTAL) {
                String newWaterMarkStr = format(visitor.getLastModifyDate());
                outgoing = setIncrementalWaterMarkValue(session, outgoing, waterMarkPropName, newWaterMarkStr);

                logger.info("Recorded load status feed {} date {}", new Object[] { feedName, newWaterMarkStr });
            }
            session.transfer(outgoing, REL_SUCCESS);
        }
    } catch (final Exception e) {
        if (incoming == null) {
            logger.error(
                    "Unable to execute SQL select from table due to {}. No incoming flow file to route to failure",
                    new Object[] { e });
        } else {
            logger.error("Unable to execute SQL select from table due to {}; routing to failure",
                    new Object[] { incoming, e });
            session.transfer(incoming, REL_FAILURE);
        }
    }
}

From source file:org.axonframework.migration.eventstore.JpaEventStoreMigrator.java

public boolean run() throws Exception {
    final AtomicInteger updateCount = new AtomicInteger();
    final AtomicInteger skipCount = new AtomicInteger();
    final AtomicLong lastId = new AtomicLong(
            Long.parseLong(configuration.getProperty("lastProcessedId", "-1")));
    try {// ww  w.ja v a2s  .com
        TransactionTemplate template = new TransactionTemplate(txManager);
        template.setReadOnly(true);
        System.out.println("Starting conversion. Fetching batches of " + QUERY_BATCH_SIZE + " items.");
        while (template.execute(new TransactionCallback<Boolean>() {
            @Override
            public Boolean doInTransaction(TransactionStatus status) {
                final Session hibernate = entityManager.unwrap(Session.class);
                Iterator<Object[]> results = hibernate.createQuery(
                        "SELECT e.aggregateIdentifier, e.sequenceNumber, e.type, e.id FROM DomainEventEntry e "
                                + "WHERE e.id > :lastIdentifier ORDER BY e.id ASC")
                        .setFetchSize(1000).setMaxResults(QUERY_BATCH_SIZE).setReadOnly(true)
                        .setParameter("lastIdentifier", lastId.get()).iterate();
                if (!results.hasNext()) {
                    System.out.println("Empty batch. Assuming we're done.");
                    return false;
                } else if (Thread.interrupted()) {
                    System.out.println("Received an interrupt. Stopping...");
                    return false;
                }
                while (results.hasNext()) {
                    List<ConversionItem> conversionBatch = new ArrayList<ConversionItem>();
                    while (conversionBatch.size() < CONVERSION_BATCH_SIZE && results.hasNext()) {
                        Object[] item = results.next();
                        String aggregateIdentifier = (String) item[0];
                        long sequenceNumber = (Long) item[1];
                        String type = (String) item[2];
                        Long entryId = (Long) item[3];
                        lastId.set(entryId);
                        conversionBatch
                                .add(new ConversionItem(sequenceNumber, aggregateIdentifier, type, entryId));
                    }
                    if (!conversionBatch.isEmpty()) {
                        executor.submit(new TransformationTask(conversionBatch, skipCount));
                    }
                }
                return true;
            }
        })) {
            System.out.println("Reading next batch, starting at ID " + lastId.get() + ".");
            System.out.println(
                    "Estimated backlog size is currently: " + (workQueue.size() * CONVERSION_BATCH_SIZE));
        }
    } finally {
        executor.shutdown();
        executor.awaitTermination(5, TimeUnit.MINUTES);
        if (lastId.get() >= 0) {
            System.out.println(
                    "Processed events from old event store up to (and including) id = " + lastId.get());
        }
    }
    System.out.println("In total " + updateCount.get() + " items have been converted.");
    return skipCount.get() == 0;
}

From source file:org.elasticsearch.test.ElasticsearchIntegrationTest.java

/**
 * Waits until at least a give number of document is visible for searchers
 *
 * @param numDocs         number of documents to wait for
 * @param maxWaitTime     if not progress have been made during this time, fail the test
 * @param maxWaitTimeUnit the unit in which maxWaitTime is specified
 * @param indexer         a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed.
 *                        This saves on unneeded searches.
 * @return the actual number of docs seen.
 * @throws InterruptedException//from  w  w  w. j a v  a 2s  . c o m
 */
public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit,
        final @Nullable BackgroundIndexer indexer) throws InterruptedException {
    final AtomicLong lastKnownCount = new AtomicLong(-1);
    long lastStartCount = -1;
    Predicate<Object> testDocs = new Predicate<Object>() {
        @Override
        public boolean apply(Object o) {
            if (indexer != null) {
                lastKnownCount.set(indexer.totalIndexedDocs());
            }
            if (lastKnownCount.get() >= numDocs) {
                try {
                    long count = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet()
                            .getCount();
                    if (count == lastKnownCount.get()) {
                        // no progress - try to refresh for the next time
                        client().admin().indices().prepareRefresh().get();
                    }
                    lastKnownCount.set(count);
                } catch (Throwable e) { // count now acts like search and barfs if all shards failed...
                    logger.debug("failed to executed count", e);
                    return false;
                }
                logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount.get(), numDocs);
            } else {
                logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount.get(), numDocs);
            }
            return lastKnownCount.get() >= numDocs;
        }
    };

    while (!awaitBusy(testDocs, maxWaitTime, maxWaitTimeUnit)) {
        if (lastStartCount == lastKnownCount.get()) {
            // we didn't make any progress
            fail("failed to reach " + numDocs + "docs");
        }
        lastStartCount = lastKnownCount.get();
    }
    return lastKnownCount.get();
}