Example usage for org.apache.commons.lang.time StopWatch getSplitTime

List of usage examples for org.apache.commons.lang.time StopWatch getSplitTime

Introduction

In this page you can find the example usage for org.apache.commons.lang.time StopWatch getSplitTime.

Prototype

public long getSplitTime() 

Source Link

Document

Get the split time on the stopwatch.

This is the time between start and latest split.

Usage

From source file:com.continuuity.weave.yarn.YarnWeaveController.java

@Override
protected void doStartUp() {
    super.doStartUp();

    // Submit and poll the status of the yarn application
    try {/*from   w  w  w .j  a v  a  2 s.  com*/
        processController = startUp.call();

        YarnApplicationReport report = processController.getReport();
        LOG.debug("Application {} submit", report.getApplicationId());

        YarnApplicationState state = report.getYarnApplicationState();
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();
        stopWatch.split();
        long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_START_SECONDS, TimeUnit.SECONDS);

        LOG.info("Checking yarn application status");
        while (!hasRun(state) && stopWatch.getSplitTime() < maxTime) {
            report = processController.getReport();
            state = report.getYarnApplicationState();
            LOG.debug("Yarn application status: {}", state);
            TimeUnit.SECONDS.sleep(1);
            stopWatch.split();
        }
        LOG.info("Yarn application is in state {}", state);
        if (state != YarnApplicationState.RUNNING) {
            LOG.info("Yarn application is not in running state. Shutting down controller.",
                    Constants.APPLICATION_MAX_START_SECONDS);
            forceShutDown();
        } else {
            try {
                URL resourceUrl = URI
                        .create(String.format("http://%s:%d", report.getHost(), report.getRpcPort()))
                        .resolve(TrackerService.PATH).toURL();
                resourcesClient = new ResourceReportClient(resourceUrl);
            } catch (IOException e) {
                resourcesClient = null;
            }
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.continuuity.weave.yarn.YarnWeaveController.java

@Override
protected void doShutDown() {
    if (processController == null) {
        LOG.warn("No process controller for application that is not submitted.");
        return;/*from ww w . ja  v a2s  . c  o m*/
    }

    // Wait for the stop message being processed
    try {
        Uninterruptibles.getUninterruptibly(getStopMessageFuture(), Constants.APPLICATION_MAX_STOP_SECONDS,
                TimeUnit.SECONDS);
    } catch (Exception e) {
        LOG.error("Failed to wait for stop message being processed.", e);
        // Kill the application through yarn
        kill();
    }

    // Poll application status from yarn
    try {
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();
        stopWatch.split();
        long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);

        YarnApplicationReport report = processController.getReport();
        FinalApplicationStatus finalStatus = report.getFinalApplicationStatus();
        while (finalStatus == FinalApplicationStatus.UNDEFINED && stopWatch.getSplitTime() < maxTime) {
            LOG.debug("Yarn application final status for {} {}", report.getApplicationId(), finalStatus);
            TimeUnit.SECONDS.sleep(1);
            stopWatch.split();
            finalStatus = processController.getReport().getFinalApplicationStatus();
        }
        LOG.debug("Yarn application final status is {}", finalStatus);

        // Application not finished after max stop time, kill the application
        if (finalStatus == FinalApplicationStatus.UNDEFINED) {
            kill();
        }
    } catch (Exception e) {
        LOG.warn("Exception while waiting for application report: {}", e.getMessage(), e);
        kill();
    }

    super.doShutDown();
}

From source file:io.cloudslang.engine.queue.services.ExecutionQueueServiceImpl.java

@Override
@Transactional/*from   w w w  . ja v a2s  .c om*/
public void enqueue(List<ExecutionMessage> messages) {
    if (CollectionUtils.isEmpty(messages))
        return;

    if (logger.isDebugEnabled())
        logger.debug("Enqueue " + messages.size() + " messages");
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    // assign worker for messages with pending status
    messages = executionAssignerService.assignWorkers(messages);
    if (logger.isDebugEnabled())
        logger.debug("Messages were assigned successfully");

    final List<ExecutionMessage> stateMessages = new ArrayList<>(messages.size());

    // first fill the execution state id for new insert
    for (ExecutionMessage msg : messages) {
        if (msg.getExecStateId() == ExecutionMessage.EMPTY_EXEC_STATE_ID) {
            long execStateId = executionQueueRepository.generateExecStateId();
            msg.setExecStateId(execStateId);
            stateMessages.add(msg);
        } else if (msg.getPayload() != null && msg.getStatus() == ExecStatus.IN_PROGRESS) {
            stateMessages.add(msg);
        }
    }

    if (CollectionUtils.isNotEmpty(listeners)) {
        stopWatch.split();
        for (QueueListener listener : listeners) {
            listener.prePersist(messages);
        }
        if (logger.isDebugEnabled())
            logger.debug("Listeners done in " + (stopWatch.getSplitTime()) + " ms");
    }

    stopWatch.split();
    if (stateMessages.size() > 0)
        executionQueueRepository.insertExecutionStates(stateMessages);

    long msgVersion = versionService.getCurrentVersion(VersionService.MSG_RECOVERY_VERSION_COUNTER_NAME);
    executionQueueRepository.insertExecutionQueue(messages, msgVersion);
    if (logger.isDebugEnabled())
        logger.debug("Persistency done in " + (stopWatch.getSplitTime()) + " ms");

    if (CollectionUtils.isNotEmpty(listeners)) {
        stopWatch.split();
        List<ExecutionMessage> failedMessages = filter(messages, ExecStatus.FAILED);
        List<ExecutionMessage> terminatedMessages = filter(messages, ExecStatus.TERMINATED);
        for (QueueListener listener : listeners) {
            listener.onEnqueue(messages, messages.size());
            if (failedMessages.size() > 0)
                listener.onFailed(failedMessages);
            if (terminatedMessages.size() > 0)
                listener.onTerminated(terminatedMessages);
        }
        if (logger.isDebugEnabled())
            logger.debug("Listeners done in " + (stopWatch.getSplitTime()) + " ms");
    }
    if (logger.isDebugEnabled())
        logger.debug("Enqueue done in " + (stopWatch.getTime()) + " ms");
}

From source file:com.auditbucket.test.functional.TestForceDuplicateRlx.java

@Test
public void uniqueChangeRLXUnderLoad() throws Exception {
    logger.info("uniqueChangeRLXUnderLoad started");
    SecurityContextHolder.getContext().setAuthentication(authMike);
    regService.registerSystemUser(new RegistrationBean("TestTrack", mike, "bah").setIsUnique(false));

    int auditMax = 10;
    int logMax = 10;
    int fortress = 1;
    String simpleJson = "{\"who\":";
    ArrayList<Long> list = new ArrayList<>();

    logger.info("FortressCount: " + fortressMax + " AuditCount: " + auditMax + " LogCount: " + logMax);
    logger.info(/*from   w  w w .j av a  2  s  .  co  m*/
            "We will be expecting a total of " + (auditMax * logMax * fortressMax) + " messages to be handled");

    StopWatch watch = new StopWatch();
    watch.start();
    double splitTotals = 0;
    long totalRows = 0;
    int auditSleepCount; // Discount all the time we spent sleeping

    DecimalFormat f = new DecimalFormat("##.000");

    while (fortress <= fortressMax) {
        String fortressName = "bulkloada" + fortress;
        int audit = 1;
        long requests = 0;
        auditSleepCount = 0;

        Fortress iFortress = fortressService.registerFortress(new FortressInputBean(fortressName, true));
        requests++;
        logger.info("Starting run for " + fortressName);
        while (audit <= auditMax) {
            MetaInputBean aib = new MetaInputBean(iFortress.getName(), fortress + "olivia@sunnybell.com",
                    "CompanyNode", new DateTime(), "ABC" + audit);
            TrackResultBean arb = trackEP.trackHeader(aib, null, null).getBody();
            requests++;
            int log = 1;
            while (log <= logMax) {
                createLog(simpleJson, aib, arb, log);
                requests++;
                log++;
            } // Logs created
            audit++;
        } // Audit headers finished with
        watch.split();
        double fortressRunTime = (watch.getSplitTime() - auditSleepCount) / 1000d;
        logger.info("*** " + iFortress.getName() + " took " + fortressRunTime + "  avg processing time for ["
                + requests + "] RPS= " + f.format(fortressRunTime / requests) + ". Requests per second "
                + f.format(requests / fortressRunTime));

        splitTotals = splitTotals + fortressRunTime;
        totalRows = totalRows + requests;
        watch.reset();
        watch.start();
        list.add(iFortress.getId());
        fortress++;
    }

    logger.info("*** Created data set in " + f.format(splitTotals) + " fortress avg = "
            + f.format(splitTotals / fortressMax) + " avg processing time per request "
            + f.format(splitTotals / totalRows) + ". Requests per second " + f.format(totalRows / splitTotals));
    watch.reset();
}

From source file:com.auditbucket.test.functional.TestAuditIntegration.java

@Test
public void stressWithHighVolume() throws Exception {
    assumeTrue(!ignoreMe);/*from   w ww .  j  a  v a2  s.c o  m*/
    logger.info("stressWithHighVolume started");
    SecurityContextHolder.getContext().setAuthentication(authA);
    //Neo4jHelper.cleanDb(graphDatabaseService, true);
    regService.registerSystemUser(new RegistrationBean("TestTrack", email, "bah"));

    int auditMax = 10;
    int logMax = 10;
    int fortress = 1;
    //fortressMax = 10;
    String simpleJson = "{\"who\":";
    ArrayList<Long> list = new ArrayList<>();

    logger.info("FortressCount: " + fortressMax + " AuditCount: " + auditMax + " LogCount: " + logMax);
    logger.info(
            "We will be expecting a total of " + (auditMax * logMax * fortressMax) + " messages to be handled");

    StopWatch watch = new StopWatch();
    watch.start();
    double splitTotals = 0;
    long totalRows = 0;
    int auditSleepCount; // Discount all the time we spent sleeping

    DecimalFormat f = new DecimalFormat("##.000");

    while (fortress <= fortressMax) {
        String fortressName = "bulkloada" + fortress;
        int audit = 1;
        long requests = 0;
        auditSleepCount = 0;

        Fortress iFortress = fortressService.registerFortress(new FortressInputBean(fortressName, false));
        requests++;
        logger.info("Starting run for " + fortressName);
        while (audit <= auditMax) {
            boolean searchChecked = false;
            MetaInputBean aib = new MetaInputBean(iFortress.getName(), fortress + "olivia@sunnybell.com",
                    "CompanyNode", new DateTime(), "ABC" + audit);
            TrackResultBean arb = mediationFacade.createHeader(aib, null);
            requests++;
            int log = 1;
            while (log <= logMax) {
                //String escJson = Helper.getBigJsonText(log);
                //trackService.createLog(new LogInputBean(arb.getMetaKey(), aib.getFortressUser(), new DateTime(), escJson ));

                createLog(simpleJson, aib, arb, log);
                //Thread.sleep(100);
                requests++;
                if (!searchChecked) {
                    searchChecked = true;
                    MetaHeader metaHeader = trackService.getHeader(arb.getMetaKey());
                    requests++;
                    int checkCount = waitForHeaderToUpdate(metaHeader);
                    auditSleepCount = auditSleepCount + (400 * checkCount);
                    requests = requests + checkCount;
                } // searchCheck done
                log++;
            } // Logs created
            audit++;
        } // Audit headers finished with
        watch.split();
        double fortressRunTime = (watch.getSplitTime() - auditSleepCount) / 1000d;
        logger.info("*** " + iFortress.getName() + " took " + fortressRunTime + "  avg processing time for ["
                + requests + "] RPS= " + f.format(fortressRunTime / requests) + ". Requests per second "
                + f.format(requests / fortressRunTime));

        splitTotals = splitTotals + fortressRunTime;
        totalRows = totalRows + requests;
        watch.reset();
        watch.start();
        list.add(iFortress.getId());
        fortress++;
    }

    logger.info("*** Created data set in " + f.format(splitTotals) + " fortress avg = "
            + f.format(splitTotals / fortressMax) + " avg processing time per request "
            + f.format(splitTotals / totalRows) + ". Requests per second " + f.format(totalRows / splitTotals));
    watch.reset();
    Thread.sleep(5000); // give things a final chance to complete

    validateLogsIndexed(list, auditMax, logMax);
    doSearchTests(auditMax, list, watch);
}

From source file:org.apache.james.cli.ServerCmd.java

/**
 * Main method to initialize the class.//from w  w w.j  ava2 s  .  co  m
 *
 * @param args Command-line arguments.
 */
public static void main(String[] args) {

    try {
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();
        CommandLine cmd = parseCommandLine(args);
        CmdType cmdType = new ServerCmd(new JmxServerProbe(cmd.getOptionValue(HOST_OPT_LONG), getPort(cmd)))
                .executeCommandLine(cmd);
        stopWatch.split();
        print(new String[] { Joiner.on(' ').join(cmdType.getCommand(), "command executed sucessfully in",
                stopWatch.getSplitTime(), "ms.") }, System.out);
        stopWatch.stop();
        System.exit(0);
    } catch (JamesCliException e) {
        failWithMessage(e.getMessage());
    } catch (ParseException e) {
        failWithMessage("Error parsing command line : " + e.getMessage());
    } catch (IOException ioe) {
        failWithMessage("Error connecting to remote JMX agent : " + ioe.getMessage());
    } catch (Exception e) {
        failWithMessage("Error while executing command:" + e.getMessage());
    }

}

From source file:org.apache.tinkerpop.gremlin.server.handler.IteratorHandler.java

@Override
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise)
        throws Exception {
    if (msg instanceof Pair) {
        try {/*from w ww. ja v  a 2 s.co m*/
            final Pair pair = (Pair) msg;
            final Iterator itty = (Iterator) pair.getValue1();
            final RequestMessage requestMessage = (RequestMessage) pair.getValue0();

            // the batch size can be overriden by the request
            final int resultIterationBatchSize = (Integer) requestMessage.optionalArgs(Tokens.ARGS_BATCH_SIZE)
                    .orElse(settings.resultIterationBatchSize);

            // timer for the total serialization time
            final StopWatch stopWatch = new StopWatch();

            final EventExecutorGroup executorService = ctx.executor();
            final Future<?> iteration = executorService.submit((Callable<Void>) () -> {
                logger.debug("Preparing to iterate results from - {} - in thread [{}]", requestMessage,
                        Thread.currentThread().getName());

                stopWatch.start();

                List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);
                while (itty.hasNext()) {
                    aggregate.add(itty.next());

                    // send back a page of results if batch size is met or if it's the end of the results being
                    // iterated
                    if (aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                        final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                                : ResponseStatusCode.SUCCESS;
                        ctx.writeAndFlush(
                                ResponseMessage.build(requestMessage).code(code).result(aggregate).create());
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    }

                    stopWatch.split();
                    if (stopWatch.getSplitTime() > settings.serializedResponseTimeout)
                        throw new TimeoutException(
                                "Serialization of the entire response exceeded the serializeResponseTimeout setting");

                    stopWatch.unsplit();
                }

                return null;
            });

            iteration.addListener(f -> {
                stopWatch.stop();

                if (!f.isSuccess()) {
                    final String errorMessage = String.format(
                            "Response iteration and serialization exceeded the configured threshold for request [%s] - %s",
                            msg, f.cause().getMessage());
                    logger.warn(errorMessage);
                    ctx.writeAndFlush(
                            ResponseMessage.build(requestMessage).code(ResponseStatusCode.SERVER_ERROR_TIMEOUT)
                                    .statusMessage(errorMessage).create());
                }
            });
        } finally {
            ReferenceCountUtil.release(msg);
        }

    } else {
        ctx.write(msg, promise);
    }
}

From source file:org.apache.tinkerpop.gremlin.server.op.AbstractEvalOpProcessor.java

/**
 * Called by {@link #evalOpInternal} when iterating a result set. Implementers should respect the
 * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if
 * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such
 * cases.//w  w w  .  ja  v a  2  s .c o m
 *
 * @param context The Gremlin Server {@link Context} object containing settings, request message, etc.
 * @param itty The result to iterator
 * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time.
 */
protected void handleIterator(final Context context, final Iterator itty)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // sessionless requests are always transaction managed, but in-session requests are configurable.
    final boolean managedTransactionsForRequest = manageTransactions ? true
            : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false);

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        if (managedTransactionsForRequest)
            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        if (aggregate.size() < resultIterationBatchSize)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code);
                } catch (Exception ex) {
                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    if (manageTransactions)
                        attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement);
                    break;
                }

                // only need to reset the aggregation list if there's more stuff to write
                if (itty.hasNext())
                    aggregate = new ArrayList<>(resultIterationBatchSize);
                else {
                    // iteration and serialization are both complete which means this finished successfully. note that
                    // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                    // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                    // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                    // there are no more items to iterate and serialization is complete
                    if (managedTransactionsForRequest)
                        attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);

                    // exit the result iteration loop as there are no more results left.  using this external control
                    // because of the above commit.  some graphs may open a new transaction on the call to
                    // hasNext()
                    hasMore = false;
                }

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}

From source file:org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor.java

/**
 * Provides a generic way of iterating a result set back to the client. Implementers should respect the
 * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if
 * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such
 * cases.//from  w ww .  j  ava 2 s.co  m
 *
 * @param context The Gremlin Server {@link Context} object containing settings, request message, etc.
 * @param itty The result to iterator
 * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time.
 */
protected void handleIterator(final Context context, final Iterator itty)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // sessionless requests are always transaction managed, but in-session requests are configurable.
    final boolean managedTransactionsForRequest = manageTransactions ? true
            : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false);

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        if (managedTransactionsForRequest)
            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // check if an implementation needs to force flush the aggregated results before the iteration batch
        // size is reached.
        final boolean forceFlush = isForceFlushed(ctx, msg, itty);

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        //
        // there is a need to check hasNext() on the iterator because if the channel is not writeable the
        // previous pass through the while loop will have next()'d the iterator and if it is "done" then a
        // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't
        // require a forced flush which can be forced by sub-classes.
        //
        // this could be placed inside the isWriteable() portion of the if-then below but it seems better to
        // allow iteration to continue into a batch if that is possible rather than just doing nothing at all
        // while waiting for the client to catch up
        if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame = null;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code,
                            generateMetaData(ctx, msg, code, itty));
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();

                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    if (managedTransactionsForRequest)
                        attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement);
                    break;
                }

                // track whether there is anything left in the iterator because it needs to be accessed after
                // the transaction could be closed - in that case a call to hasNext() could open a new transaction
                // unintentionally
                final boolean moreInIterator = itty.hasNext();

                try {
                    // only need to reset the aggregation list if there's more stuff to write
                    if (moreInIterator)
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    else {
                        // iteration and serialization are both complete which means this finished successfully. note that
                        // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                        // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                        // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                        // there are no more items to iterate and serialization is complete
                        if (managedTransactionsForRequest)
                            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);

                        // exit the result iteration loop as there are no more results left.  using this external control
                        // because of the above commit.  some graphs may open a new transaction on the call to
                        // hasNext()
                        hasMore = false;
                    }
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();
                    throw ex;
                }

                if (!moreInIterator)
                    iterateComplete(ctx, msg, itty);

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (settings.serializedResponseTimeout > 0
                && stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}

From source file:org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor.java

protected void handleIterator(final Context context, final Iterator itty, final Graph graph)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        onTraversalSuccess(graph, context);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;// ww  w  .  java 2s  . com
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // check if an implementation needs to force flush the aggregated results before the iteration batch
        // size is reached.
        final boolean forceFlush = isForceFlushed(ctx, msg, itty);

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        //
        // there is a need to check hasNext() on the iterator because if the channel is not writeable the
        // previous pass through the while loop will have next()'d the iterator and if it is "done" then a
        // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't
        // require a forced flush which can be forced by sub-classes.
        //
        // this could be placed inside the isWriteable() portion of the if-then below but it seems better to
        // allow iteration to continue into a batch if that is possible rather than just doing nothing at all
        // while waiting for the client to catch up
        if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame = null;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code,
                            generateMetaData(ctx, msg, code, itty));
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();

                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    onError(graph, context);
                    break;
                }

                try {
                    // only need to reset the aggregation list if there's more stuff to write
                    if (itty.hasNext())
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    else {
                        // iteration and serialization are both complete which means this finished successfully. note that
                        // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                        // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                        // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                        // there are no more items to iterate and serialization is complete
                        onTraversalSuccess(graph, context);

                        // exit the result iteration loop as there are no more results left.  using this external control
                        // because of the above commit.  some graphs may open a new transaction on the call to
                        // hasNext()
                        hasMore = false;
                    }
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();
                    throw ex;
                }

                if (!itty.hasNext())
                    iterateComplete(ctx, msg, itty);

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (settings.serializedResponseTimeout > 0
                && stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}