Example usage for java.util.concurrent FutureTask cancel

List of usage examples for java.util.concurrent FutureTask cancel

Introduction

In this page you can find the example usage for java.util.concurrent FutureTask cancel.

Prototype

public boolean cancel(boolean mayInterruptIfRunning) 

Source Link

Usage

From source file:ubic.gemma.loader.util.fetcher.FtpArchiveFetcher.java

/**
 * @param newDir//from  w  w w .  j  av a 2s.  c om
 * @param seekFile
 */
protected void unPack(final File toUnpack) {
    FutureTask<Boolean> future = new FutureTask<Boolean>(new Callable<Boolean>() {
        @Override
        @SuppressWarnings("synthetic-access")
        public Boolean call() {
            File extractedFile = new File(FileTools.chompExtension(toUnpack.getAbsolutePath()));
            /*
             * Decide if an existing file is plausibly usable. Err on the side of caution.
             */
            if (allowUseExisting && extractedFile.canRead() && extractedFile.length() >= toUnpack.length()
                    && !FileUtils.isFileNewer(toUnpack, extractedFile)) {
                log.warn("Expanded file exists, skipping re-expansion: " + extractedFile);
                return Boolean.TRUE;
            }

            if (expander != null) {
                expander.setSrc(toUnpack);
                expander.setDest(toUnpack.getParentFile());
                expander.perform();
            } else if (toUnpack.getAbsolutePath().toLowerCase().endsWith("zip")) {
                try {
                    FileTools.unZipFiles(toUnpack.getAbsolutePath());
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }

            } else { // gzip.
                try {
                    FileTools.unGzipFile(toUnpack.getAbsolutePath());
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }

            return Boolean.TRUE;
        }

    });
    ExecutorService executor = Executors.newSingleThreadExecutor();

    executor.execute(future);
    executor.shutdown();

    StopWatch s = new StopWatch();
    s.start();
    while (!future.isDone() && !future.isCancelled()) {
        try {
            Thread.sleep(INFO_UPDATE_INTERVAL);
        } catch (InterruptedException ie) {
            future.cancel(true);
            return;
        }
        log.info("Unpacking archive ... " + Math.floor(s.getTime() / 1000.0) + " seconds elapsed");
    }
}

From source file:ubic.gemma.core.loader.util.fetcher.FtpArchiveFetcher.java

protected void unPack(final File toUnpack) {
    FutureTask<Boolean> future = new FutureTask<>(new Callable<Boolean>() {
        @Override/*www.  j  a v  a  2  s  .  co m*/
        @SuppressWarnings("synthetic-access")
        public Boolean call() {
            File extractedFile = new File(FileTools.chompExtension(toUnpack.getAbsolutePath()));
            /*
             * Decide if an existing file is plausibly usable. Err on the side of caution.
             */
            if (allowUseExisting && extractedFile.canRead() && extractedFile.length() >= toUnpack.length()
                    && !FileUtils.isFileNewer(toUnpack, extractedFile)) {
                AbstractFetcher.log.warn("Expanded file exists, skipping re-expansion: " + extractedFile);
                return Boolean.TRUE;
            }

            if (expander != null) {
                expander.setSrc(toUnpack);
                expander.setDest(toUnpack.getParentFile());
                expander.perform();
            } else if (toUnpack.getAbsolutePath().toLowerCase().endsWith("zip")) {
                try {
                    FileTools.unZipFiles(toUnpack.getAbsolutePath());
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }

            } else { // gzip.
                try {
                    FileTools.unGzipFile(toUnpack.getAbsolutePath());
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }

            return Boolean.TRUE;
        }

    });
    ExecutorService executor = Executors.newSingleThreadExecutor();

    executor.execute(future);
    executor.shutdown();

    StopWatch s = new StopWatch();
    s.start();
    while (!future.isDone() && !future.isCancelled()) {
        try {
            Thread.sleep(AbstractFetcher.INFO_UPDATE_INTERVAL);
        } catch (InterruptedException ie) {
            future.cancel(true);
            return;
        }
        AbstractFetcher.log
                .info("Unpacking archive ... " + Math.floor(s.getTime() / 1000.0) + " seconds elapsed");
    }
}

From source file:org.alfresco.repo.content.metadata.AbstractMappingMetadataExtracter.java

/**
 * Calls the {@link AbstractMappingMetadataExtracter#extractRaw(ContentReader)} method
 * using the given limits.//from  www  .ja  v  a  2 s .c  om
 * <p>
 * Currently the only limit supported by {@link MetadataExtracterLimits} is a timeout
 * so this method uses {@link AbstractMappingMetadataExtracter#getExecutorService()}
 * to execute a {@link FutureTask} with any timeout defined.
 * <p>
 * If no timeout limit is defined or is unlimited (-1),
 * the <code>extractRaw</code> method is called directly.
 * 
 * @param reader        the document to extract the values from.  This stream provided by
 *                      the reader must be closed if accessed directly.
 * @param limits        the limits to impose on the extraction
 * @return              Returns a map of document property values keyed by property name.
 * @throws Throwable    All exception conditions can be handled.
 */
private Map<String, Serializable> extractRaw(ContentReader reader, MetadataExtracterLimits limits)
        throws Throwable {
    if (limits == null || limits.getTimeoutMs() == -1) {
        return extractRaw(reader);
    }
    FutureTask<Map<String, Serializable>> task = null;
    StreamAwareContentReaderProxy proxiedReader = null;
    try {
        proxiedReader = new StreamAwareContentReaderProxy(reader);
        task = new FutureTask<Map<String, Serializable>>(new ExtractRawCallable(proxiedReader));
        getExecutorService().execute(task);
        return task.get(limits.getTimeoutMs(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        task.cancel(true);
        if (null != proxiedReader) {
            proxiedReader.release();
        }
        throw e;
    } catch (InterruptedException e) {
        // We were asked to stop
        task.cancel(true);
        return null;
    } catch (ExecutionException e) {
        // Unwrap our cause and throw that
        Throwable cause = e.getCause();
        if (cause != null && cause instanceof ExtractRawCallableException) {
            cause = ((ExtractRawCallableException) cause).getCause();
        }
        throw cause;
    }
}

From source file:info.pancancer.arch3.test.TestWorker.java

@Test
public void testWorker_endlessFromConfig() throws Exception {
    HierarchicalINIConfiguration configObj = new HierarchicalINIConfiguration();
    configObj.addProperty("rabbit.rabbitMQQueueName", "seqware");
    configObj.addProperty("rabbit.rabbitMQHost", "localhost");
    configObj.addProperty("rabbit.rabbitMQUser", "guest");
    configObj.addProperty("rabbit.rabbitMQPass", "guest");
    configObj.addProperty("worker.heartbeatRate", "2.5");
    configObj.addProperty("worker.max-runs", "1");
    configObj.addProperty("worker.preworkerSleep", "1");
    configObj.addProperty("worker.postworkerSleep", "1");
    configObj.addProperty("worker.endless", "true");
    configObj.addProperty("worker.hostUserName", System.getProperty("user.name"));

    byte[] body = setupMessage();
    Delivery testDelivery = new Delivery(mockEnvelope, mockProperties, body);
    setupMockQueue(testDelivery);/*from  ww w . j av  a  2  s  .  c  o  m*/
    Mockito.when(Utilities.parseConfig(anyString())).thenReturn(configObj);

    //Because the code that does cleanup in calls resultHandler.waitFor(); we need to actually execute something, even if it does nothing.
    Mockito.doNothing().when(mockExecutor).execute(any(CommandLine.class),
            any(DefaultExecuteResultHandler.class));

    // This is to mock the cleanup command - we don't really want to execute the command for deleting contents of /datastore, at least not when unit testing on a workstation!
    PowerMockito.whenNew(DefaultExecutor.class).withNoArguments().thenReturn(mockExecutor);

    Mockito.when(mockExecHandler.hasResult()).thenReturn(true);
    PowerMockito.whenNew(DefaultExecuteResultHandler.class).withNoArguments().thenReturn(mockExecHandler);

    final FutureTask<String> tester = new FutureTask<>(new Callable<String>() {
        @Override
        public String call() {
            LOG.info("tester thread started");
            try {
                Worker.main(new String[] { "--config", "src/test/resources/workerConfig.ini", "--uuid",
                        "vm123456", "--pidFile", "/var/run/arch3_worker.pid" });
            } catch (CancellationException | InterruptedException e) {
                LOG.error("Exception caught: " + e.getMessage());
                return e.getMessage();
            } catch (Exception e) {
                e.printStackTrace();
                fail("Unexpected exception");
                return null;
            } finally {
                Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture());
                String s = appendEventsIntoString(argCaptor.getAllValues());
                return s;
            }
        }

    });

    final Thread killer = new Thread(new Runnable() {

        @Override
        public void run() {
            LOG.info("killer thread started");
            try {
                // The endless worker will not end on its own (because it's endless) so we need to wait a little bit (0.5 seconds) and
                // then kill it as if it were killed by the command-line script (kill_worker_daemon.sh).
                Thread.sleep(2500);
            } catch (InterruptedException e) {
                e.printStackTrace();
                LOG.error(e.getMessage());
            }
            tester.cancel(true);
        }
    });

    ExecutorService es = Executors.newFixedThreadPool(2);
    es.execute(tester);
    es.execute(killer);

    try {
        tester.get();
    } catch (CancellationException e) {
        Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture());
        List<LoggingEvent> tmpList = new LinkedList<LoggingEvent>(argCaptor.getAllValues());
        String output = this.appendEventsIntoString(tmpList);

        assertTrue("--endless flag was detected and set",
                output.contains("The \"--endless\" flag was set, this worker will run endlessly!"));
        int numJobsPulled = StringUtils.countMatches(output, " WORKER IS PREPARING TO PULL JOB FROM QUEUE ");
        LOG.info("Number of jobs attempted: " + numJobsPulled);
        assertTrue("number of jobs attempted > 1", numJobsPulled > 1);
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    }
}

From source file:info.pancancer.arch3.test.TestWorker.java

@Test
public void testWorker_endless() throws Exception {

    byte[] body = setupMessage();
    Delivery testDelivery = new Delivery(mockEnvelope, mockProperties, body);
    setupMockQueue(testDelivery);/*from  w  w w  .  j  ava2  s.  c om*/
    Mockito.when(Utilities.parseJSONStr(anyString())).thenCallRealMethod();
    Mockito.when(Utilities.parseConfig(anyString())).thenCallRealMethod();

    //Because the code that does cleanup in calls resultHandler.waitFor(); we need to actually execute something, even if it does nothing.
    Mockito.doNothing().when(mockExecutor).execute(any(CommandLine.class),
            any(DefaultExecuteResultHandler.class));

    // This is to mock the cleanup command - we don't really want to execute the command for deleting contents of /datastore, at least not when unit testing on a workstation!
    PowerMockito.whenNew(DefaultExecutor.class).withNoArguments().thenReturn(mockExecutor);

    Mockito.when(mockExecHandler.hasResult()).thenReturn(true);
    PowerMockito.whenNew(DefaultExecuteResultHandler.class).withNoArguments().thenReturn(mockExecHandler);

    final FutureTask<String> tester = new FutureTask<>(new Callable<String>() {
        @Override
        public String call() {
            LOG.debug("tester thread started");
            try {
                Worker.main(new String[] { "--config", "src/test/resources/workerConfig.ini", "--uuid",
                        "vm123456", "--endless", "--pidFile", "/var/run/arch3_worker.pid" });
            } catch (CancellationException | InterruptedException e) {
                LOG.error("Exception caught: " + e.getMessage());
                return e.getMessage();
            } catch (Exception e) {
                e.printStackTrace();
                fail("Unexpected exception");
                return null;
            } finally {
                Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture());
                String s = appendEventsIntoString(argCaptor.getAllValues());
                return s;
            }
        }

    });

    final Thread killer = new Thread(new Runnable() {

        @Override
        public void run() {
            LOG.debug("killer thread started");
            try {
                // The endless worker will not end on its own (because it's endless) so we need to wait a little bit (0.5 seconds) and
                // then kill it as if it were killed by the command-line script (kill_worker_daemon.sh).
                Thread.sleep(2500);
            } catch (InterruptedException e) {
                e.printStackTrace();
                LOG.error(e.getMessage());
            }
            tester.cancel(true);
        }
    });

    ExecutorService es = Executors.newFixedThreadPool(2);
    es.execute(tester);
    es.execute(killer);
    try {
        tester.get();
    } catch (CancellationException e) {
        Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture());
        List<LoggingEvent> tmpList = new LinkedList<>(argCaptor.getAllValues());
        String output = this.appendEventsIntoString(tmpList);
        assertTrue(output.contains("The \"--endless\" flag was set, this worker will run endlessly!"));

        int numJobsPulled = StringUtils.countMatches(output, " WORKER IS PREPARING TO PULL JOB FROM QUEUE ");

        LOG.info("Number of jobs attempted: " + numJobsPulled);
        assertTrue("number of jobs attempted > 1", numJobsPulled > 1);
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    }
}

From source file:org.atomserver.ThrottledAtomServer.java

/**
 * Execute the CallableTask on a ThreadPoolTaskExecutor. <br/>
 * NOTE: the standard Exception handling of AtomServer still happens in the AtomServer class.
 * Any Exception handling done here is for Exceptions that actually are thrown this far up
 * the food chain -- Exceptions that pertain directly to the TaskExecutor --
 * for example, TimeoutException or ExecutionException.
 *
 * @param request      The Abdera RequestContext
 * @param callableTask The CallableTask, which shoudl just be a wrapped call to
 *                     the corresponding super task.
 * @return The Abdera ResponseContext/*from www .  j  ava2 s.  co m*/
 */
private ResponseContext executePooledTask(final RequestContext request,
        final Callable<ResponseContext> callableTask) {
    ResponseContext response = null;
    Abdera abdera = request.getServiceContext().getAbdera();

    try {

        FutureTask<ResponseContext> futureTask = new FutureTask(callableTask);
        threadPool.execute(futureTask);

        try {
            logger.debug("starting to wait for the task to complete");
            response = futureTask.get(taskTimeout, TimeUnit.MILLISECONDS);

        } catch (InterruptedException e) {
            // InterruptedException - if the current thread was interrupted while waiting
            // Re-assert the thread's interrupted status
            Thread.currentThread().interrupt();

            logger.error("InterruptedException in executePooledTask: Cause= " + e.getCause() + " Message= "
                    + e.getMessage(), e);
            return getAtomServer().servererror(abdera, request,
                    "InterruptedException occurred:: " + e.getCause(), e);
        } catch (ExecutionException e) {
            // ExecutionException - if the computation threw an exception
            // Because all Exception handling is done in the super class; AtomServer, we should never get this
            logger.error("ExecutionException in executePooledTask: Cause= " + e.getCause() + " Message= "
                    + e.getMessage(), e);
            return getAtomServer().servererror(abdera, request, "ExecutionException occurred:: " + e.getCause(),
                    e);
        } catch (TimeoutException e) {
            //  TimeoutException - if the wait timed out
            logger.error("TimeoutException in executePooledTask: Cause= " + e.getCause() + " Message= "
                    + e.getMessage(), e);
            return getAtomServer().servererror(abdera, request, "TimeoutException occurred:: " + e.getCause(),
                    e);
        } catch (Exception e) {
            logger.error("Unknown Exception in executePooledTask: Cause= " + e.getCause() + " Message= "
                    + e.getMessage(), e);
            return getAtomServer().servererror(abdera, request, "Unknown Exception occurred:: " + e.getCause(),
                    e);

        } finally {
            // Best practice is to cancel tasks whose result is no longer needed
            // NOTE; task.cancel() is harmless if the task has already completed
            // Interrupt if running...
            futureTask.cancel(true);

            // Help out the garbage collector
            futureTask = null;
        }

    } finally {
        // Log all thread pool statistics at INFO level.
        //  This information is very critical in understanding the effectiveness of the pool
        logThreadPoolStats();
    }
    return response;
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutor.java

/**
 * Evaluate a script and allow for the submission of alteration to the entire evaluation execution lifecycle.
 *
 * @param script the script to evaluate/*from w  w  w  .  j av a2s . c  om*/
 * @param language the language to evaluate it in
 * @param boundVars the bindings to evaluate in the context of the script
 * @param lifeCycle a set of functions that can be applied at various stages of the evaluation process
 */
public CompletableFuture<Object> eval(final String script, final String language, final Bindings boundVars,
        final LifeCycle lifeCycle) {
    final String lang = Optional.ofNullable(language).orElse("gremlin-groovy");

    logger.debug("Preparing to evaluate script - {} - in thread [{}]", script,
            Thread.currentThread().getName());

    final Bindings bindings = new SimpleBindings();
    bindings.putAll(globalBindings);
    bindings.putAll(boundVars);

    final CompletableFuture<Object> evaluationFuture = new CompletableFuture<>();
    final FutureTask<Void> f = new FutureTask<>(() -> {
        try {
            lifeCycle.getBeforeEval().orElse(beforeEval).accept(bindings);

            logger.debug("Evaluating script - {} - in thread [{}]", script, Thread.currentThread().getName());

            final Object o = scriptEngines.eval(script, bindings, lang);

            // apply a transformation before sending back the result - useful when trying to force serialization
            // in the same thread that the eval took place given ThreadLocal nature of graphs as well as some
            // transactional constraints
            final Object result = lifeCycle.getTransformResult().isPresent()
                    ? lifeCycle.getTransformResult().get().apply(o)
                    : o;

            // a mechanism for taking the final result and doing something with it in the same thread, but
            // AFTER the eval and transform are done and that future completed.  this provides a final means
            // for working with the result in the same thread as it was eval'd
            if (lifeCycle.getWithResult().isPresent())
                lifeCycle.getWithResult().get().accept(result);

            lifeCycle.getAfterSuccess().orElse(afterSuccess).accept(bindings);

            // the evaluationFuture must be completed after all processing as an exception in lifecycle events
            // that must raise as an exception to the caller who has the returned evaluationFuture. in other words,
            // if it occurs before this point, then the handle() method won't be called again if there is an
            // exception that ends up below trying to completeExceptionally()
            evaluationFuture.complete(result);
        } catch (Throwable ex) {
            final Throwable root = null == ex.getCause() ? ex : ExceptionUtils.getRootCause(ex);

            // thread interruptions will typically come as the result of a timeout, so in those cases,
            // check for that situation and convert to TimeoutException
            if (root instanceof InterruptedException)
                evaluationFuture.completeExceptionally(new TimeoutException(String.format(
                        "Script evaluation exceeded the configured 'scriptEvaluationTimeout' threshold of %s ms for request [%s]: %s",
                        scriptEvaluationTimeout, script, root.getMessage())));
            else {
                lifeCycle.getAfterFailure().orElse(afterFailure).accept(bindings, root);
                evaluationFuture.completeExceptionally(root);
            }
        }

        return null;
    });

    executorService.execute(f);

    if (scriptEvaluationTimeout > 0) {
        // Schedule a timeout in the thread pool for future execution
        final ScheduledFuture<?> sf = scheduledExecutorService.schedule(() -> {
            logger.warn("Timing out script - {} - in thread [{}]", script, Thread.currentThread().getName());
            if (!f.isDone()) {
                lifeCycle.getAfterTimeout().orElse(afterTimeout).accept(bindings);
                f.cancel(true);
            }
        }, scriptEvaluationTimeout, TimeUnit.MILLISECONDS);

        // Cancel the scheduled timeout if the eval future is complete or the script evaluation failed
        // with exception
        evaluationFuture.handleAsync((v, t) -> {
            logger.debug(
                    "Killing scheduled timeout on script evaluation as the eval completed (possibly with exception).");
            return sf.cancel(true);
        });
    }

    return evaluationFuture;
}

From source file:org.atomserver.core.dbstore.DBBasedAtomCollection.java

protected <T> T executeTransactionally(final TransactionalTask<T> task) {
    final String t_user = AtomServerUserInfo.getUser();
    FutureTask<T> timeoutTask = null;
    try {// ww  w  .  j a v a  2s.com
        // create new timeout task
        timeoutTask = new FutureTask<T>(new Callable() {
            public T call() throws Exception {
                return (T) getTransactionTemplate().execute(new TransactionCallback() {
                    public Object doInTransaction(TransactionStatus transactionStatus) {
                        AtomServerUserInfo.setUser(t_user);
                        StopWatch stopWatch = new AtomServerStopWatch();
                        try {
                            // NOTE: we will actually wait for all of these to possibly finish,
                            //       unless the methods below honor InterruptedExceptions
                            //       BUT the transaction will still be rolled back eventually by the catch below.
                            getWriteEntriesDAO().acquireLock();
                            return task.execute();

                        } catch (Exception ee) {
                            if (ee instanceof EntryNotFoundException && (((EntryNotFoundException) ee)
                                    .getType() == EntryNotFoundException.EntryNotFoundType.DELETE)) {
                                log.warn("Exception in DB transaction", ee);
                            } else {
                                log.error("Exception in DB transaction", ee);
                            }

                            // the following is not really required, but ensures that this will rollback, without question
                            transactionStatus.setRollbackOnly();

                            if (ee instanceof InterruptedException) {
                                // InterruptedException - if the current thread was interrupted while waiting
                                // Re-assert the thread's interrupted status
                                Thread.currentThread().interrupt();
                            }
                            // NOTE: per the Spring manual, a transaction is ONLY rolled back
                            //       when a RuntimeException is thrown!!!
                            //       And the timeout causes an InterruptedException (via task.cancel()),
                            //       which is NOT Runtime!!
                            //       (AtomServerException extends RuntimeException)
                            throw (ee instanceof AtomServerException) ? (AtomServerException) ee
                                    : new AtomServerException("A " + ee.getCause().getClass().getSimpleName()
                                            + " caught in Transaction", ee.getCause());

                        } finally {
                            stopWatch.stop("DB.txn", "DB.txn");
                        }
                    }
                });
            }
        });
        // start timeout task in a new thread
        new Thread(timeoutTask).start();

        // wait for the execution to finish, timeout after X secs
        int timeout = (getTransactionTemplate().getTimeout() > 0) ? getTransactionTemplate().getTimeout()
                : DEFAULT_TXN_TIMEOUT;

        return timeoutTask.get(timeout, TimeUnit.SECONDS);

    } catch (AtomServerException ee) {
        log.debug("Exception in DB TXN: " + ee.getClass().getSimpleName() + " " + ee.getMessage());
        throw ee;
    } catch (ExecutionException ee) {
        log.debug("Exception in DB TXN: " + ee.getClass().getSimpleName() + " " + ee.getMessage());
        throw (ee.getCause() == null)
                ? new AtomServerException("A " + ee.getClass().getSimpleName() + " caught in Transaction", ee)
                : (ee.getCause() instanceof AtomServerException) ? (AtomServerException) ee.getCause()
                        : new AtomServerException(
                                "A " + ee.getCause().getClass().getSimpleName() + " caught in Transaction",
                                ee.getCause());
    } catch (Exception ee) {
        log.debug("Exception in DB TXN: " + ee.getClass().getSimpleName() + " " + ee.getMessage());
        throw new AtomServerException("A " + ee.getClass().getSimpleName() + " caught in Transaction", ee);
    } finally {
        // NOTE: We MUST call timeoutTask.cancel() here.
        //       This is the ONLY way that we see an InterruptedException in the transaction task,
        //       and thus, the ONLY way that we can make the transaction rollback.
        // NOTE: Calling cancel() on a completed task is a noop.
        log.debug("@@@@@@@@@@@@@@ Calling task.cancel");
        timeoutTask.cancel(true);
        timeoutTask = null;
    }
}

From source file:com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager.java

/**
 * Extracts the solutions from a SPARQL query.
 * //  w  w  w  . j a  va  2s . c  o  m
 * @param response
 *            The connection from which to read the results.
 * @param listener
 *            The listener to notify when the query result has been closed
 *            (optional).
 * 
 * @return The results.
 * 
 * @throws Exception
 *             If anything goes wrong.
 */
public TupleQueryResult tupleResults(final ConnectOptions opts, final UUID queryId,
        final IPreparedQueryListener listener) throws Exception {

    // listener handling the http response.
    JettyResponseListener response = null;
    // future for parsing that response (in the background).
    FutureTask<Void> ft = null;
    // iteration pattern returned to caller. once they hold this they are
    // responsible for cleaning up the request by calling close().
    TupleQueryResultImpl tqrImpl = null;
    try {

        response = doConnect(opts);

        checkResponseCode(response);

        final String contentType = response.getContentType();

        final MiniMime mimeType = new MiniMime(contentType);

        final TupleQueryResultFormat format = TupleQueryResultFormat.forMIMEType(mimeType.getMimeType());

        if (format == null)
            throw new IOException(
                    "Could not identify format for service response: serviceURI=" + opts.getBestRequestURL()
                            + ", contentType=" + contentType + " : response=" + response.getResponseBody());

        final TupleQueryResultParserFactory parserFactory = TupleQueryResultParserRegistry.getInstance()
                .get(format);

        if (parserFactory == null)
            throw new IOException("No parser for format for service response: serviceURI="
                    + opts.getBestRequestURL() + ", contentType=" + contentType + ", format=" + format
                    + " : response=" + response.getResponseBody());

        final TupleQueryResultParser parser = parserFactory.getParser();

        final BackgroundTupleResult result = new BackgroundTupleResult(parser, response.getInputStream());

        final MapBindingSet bindings = new MapBindingSet();

        final InsertBindingSetCursor cursor = new InsertBindingSetCursor(result, bindings);

        // Wrap as FutureTask so we can cancel.
        ft = new FutureTask<Void>(result, null/* result */);

        /*
         * Submit task for execution. It will asynchronously consume the
         * response, pumping solutions into the cursor.
         * 
         * Note: Can throw a RejectedExecutionException!
         */
        executor.execute(ft);

        /*
         * Note: This will block until the binding names are received, so it
         * can not be done until we submit the BackgroundTupleResult for
         * execution.
         */
        final List<String> list = new ArrayList<String>(result.getBindingNames());

        /*
         * The task was accepted by the executor. Wrap with iteration
         * pattern. Once this object is returned to the caller they are
         * responsible for calling close() to provide proper error cleanup
         * of the resources associated with the request.
         */
        final TupleQueryResultImpl tmp = new TupleQueryResultImpl(list, cursor) {

            private final AtomicBoolean notDone = new AtomicBoolean(true);

            @Override
            public boolean hasNext() throws QueryEvaluationException {

                final boolean hasNext = super.hasNext();

                if (hasNext == false) {

                    notDone.set(false);

                }

                return hasNext;

            }

            @Override
            public void handleClose() throws QueryEvaluationException {

                try {

                    super.handleClose();

                } finally {

                    if (notDone.compareAndSet(true, false)) {

                        try {
                            cancel(queryId);
                        } catch (Exception ex) {
                            log.warn(ex);
                        }

                    }

                    /*
                     * Notify the listener.
                     */
                    if (listener != null) {
                        listener.closed(queryId);
                    }

                }

            };

        };

        /*
         * Return the tuple query result listener to the caller. They now
         * have responsibility for calling close() on that object in order
         * to close the http connection and release the associated
         * resources.
         */
        return (tqrImpl = tmp);

    } finally {

        if (response != null && tqrImpl == null) {
            /*
             * Error handling code path. We have an http response listener
             * but we were not able to setup the tuple query result
             * listener.
             */
            if (ft != null) {
                /*
                 * We submitted the task to parse the response. Since the
                 * code is not returning normally (tqrImpl:=null) we cancel
                 * the FutureTask for the background parse of that response.
                 */
                ft.cancel(true/* mayInterruptIfRunning */);
            }
            // Abort the http response handling.
            response.abort();
            try {
                /*
                 * POST back to the server to cancel the request in case it
                 * is still running on the server.
                 */
                cancel(queryId);
            } catch (Exception ex) {
                log.warn(ex);
            }
            if (listener != null) {
                listener.closed(queryId);
            }
        }

    }

}