Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

public List<QueryResultWrapper> parallelQuery(List<QueryWorker> queryWorkers) throws BackendException {
    CompletionService<QueryResultWrapper> completionService = new ExecutorCompletionService<>(clientThreadPool);

    List<Future<QueryResultWrapper>> futures = Lists.newLinkedList();
    for (QueryWorker worker : queryWorkers) {
        futures.add(completionService.submit(worker));
    }/*from ww w. jav a2s  .co m*/

    boolean interrupted = false;
    List<QueryResultWrapper> results = Lists.newLinkedList();
    try {
        for (int i = 0; i < queryWorkers.size(); i++) {
            try {
                QueryResultWrapper result = completionService.take().get();
                results.add(result);
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because titan does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelQuery");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, QUERY);
            }
        }
    } finally {
        for (Future<QueryResultWrapper> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }

        if (interrupted) {
            // set interrupted on this thread and fail out
            Thread.currentThread().interrupt();
        }
    }
    return results;
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

public Map<StaticBuffer, GetItemResult> parallelGetItem(List<GetItemWorker> workers) throws BackendException {
    final CompletionService<GetItemResultWrapper> completionService = new ExecutorCompletionService<>(
            clientThreadPool);/*from w  w w  . java  2 s .  co  m*/

    final List<Future<GetItemResultWrapper>> futures = Lists.newLinkedList();
    for (GetItemWorker worker : workers) {
        futures.add(completionService.submit(worker));
    }

    boolean interrupted = false;
    final Map<StaticBuffer, GetItemResult> results = Maps.newHashMap();
    try {
        for (int i = 0; i < workers.size(); i++) {
            try {
                GetItemResultWrapper result = completionService.take().get();
                results.put(result.getTitanKey(), result.getDynamoDBResult());
            } catch (InterruptedException e) {
                interrupted = true;
                throw new BackendRuntimeException("was interrupted during parallelGet");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, GET_ITEM);
            }
        }
    } finally {
        for (Future<GetItemResultWrapper> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }

        if (interrupted) {
            // set interrupted on this thread and fail out
            Thread.currentThread().interrupt();
        }
    }
    return results;
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDBDelegate.java

public List<QueryResultWrapper> parallelQuery(List<QueryWorker> queryWorkers) throws BackendException {
    CompletionService<QueryResultWrapper> completionService = new ExecutorCompletionService<>(clientThreadPool);

    List<Future<QueryResultWrapper>> futures = Lists.newLinkedList();
    for (QueryWorker worker : queryWorkers) {
        futures.add(completionService.submit(worker));
    }// w  ww  .j a  v a2s  .co  m

    boolean interrupted = false;
    List<QueryResultWrapper> results = Lists.newLinkedList();
    try {
        for (int i = 0; i < queryWorkers.size(); i++) {
            try {
                QueryResultWrapper result = completionService.take().get();
                results.add(result);
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because janusgraph does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelQuery");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, QUERY);
            }
        }
    } finally {
        for (Future<QueryResultWrapper> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }

        if (interrupted) {
            // set interrupted on this thread and fail out
            Thread.currentThread().interrupt();
        }
    }
    return results;
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDBDelegate.java

public Map<StaticBuffer, GetItemResult> parallelGetItem(List<GetItemWorker> workers) throws BackendException {
    final CompletionService<GetItemResultWrapper> completionService = new ExecutorCompletionService<>(
            clientThreadPool);//from  w  w w. j a  v  a2  s .c  o  m

    final List<Future<GetItemResultWrapper>> futures = Lists.newLinkedList();
    for (GetItemWorker worker : workers) {
        futures.add(completionService.submit(worker));
    }

    boolean interrupted = false;
    final Map<StaticBuffer, GetItemResult> results = Maps.newHashMap();
    try {
        for (int i = 0; i < workers.size(); i++) {
            try {
                GetItemResultWrapper result = completionService.take().get();
                results.put(result.getJanusGraphKey(), result.getDynamoDBResult());
            } catch (InterruptedException e) {
                interrupted = true;
                throw new BackendRuntimeException("was interrupted during parallelGet");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, GET_ITEM);
            }
        }
    } finally {
        for (Future<GetItemResultWrapper> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }

        if (interrupted) {
            // set interrupted on this thread and fail out
            Thread.currentThread().interrupt();
        }
    }
    return results;
}

From source file:czd.lib.network.AsyncHttpClient.java

/**
 * Cancels all pending (or potentially active) requests.
 * <p>&nbsp;</p> <b>Note:</b> This will only affect requests which were created with a non-null
 * android Context. This method is intended to be used in the onDestroy method of your android
 * activities to destroy all requests which are no longer required.
 *
 * @param mayInterruptIfRunning specifies if active requests should be cancelled along with
 *                              pending requests.
 *///  w  w  w . ja v a2  s .  c o m
public void cancelAllRequests(boolean mayInterruptIfRunning) {
    for (List<WeakReference<Future<?>>> requestList : requestMap.values()) {
        if (requestList != null) {
            for (WeakReference<Future<?>> requestRef : requestList) {
                Future<?> request = requestRef.get();
                if (request != null) {
                    request.cancel(mayInterruptIfRunning);
                }
            }
        }
    }
    requestMap.clear();
}

From source file:org.springframework.amqp.rabbit.admin.RabbitBrokerAdmin.java

@ManagedOperation
public void startBrokerApplication() {
    RabbitStatus status = getStatus();/*ww w  .j  a v  a  2 s .  co  m*/
    if (status.isReady()) {
        logger.info("Rabbit Application already running.");
        return;
    }
    if (!status.isAlive()) {
        logger.info("Rabbit Process not running.");
        startNode();
        return;
    }
    logger.info("Starting Rabbit Application.");

    // This call in particular seems to be prone to hanging, so do it in the background...
    final CountDownLatch latch = new CountDownLatch(1);
    Future<Object> result = executor.submit(new Callable<Object>() {
        public Object call() throws Exception {
            try {
                return executeAndConvertRpc("rabbit", "start");
            } finally {
                latch.countDown();
            }
        }
    });
    boolean started = false;
    try {
        started = latch.await(timeout, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        result.cancel(true);
        return;
    }
    if (timeout > 0 && started) {
        if (!waitForReadyState() && !result.isDone()) {
            result.cancel(true);
        }
    }
}

From source file:org.apache.nifi.registry.bootstrap.RunNiFiRegistry.java

private void handleLogging(final Process process) {
    final Set<Future<?>> existingFutures = loggingFutures;
    if (existingFutures != null) {
        for (final Future<?> future : existingFutures) {
            future.cancel(false);
        }/*from w ww . j  av a  2 s  . c  om*/
    }

    final Future<?> stdOutFuture = loggingExecutor.submit(new Runnable() {
        @Override
        public void run() {
            final Logger stdOutLogger = LoggerFactory.getLogger("org.apache.nifi.registry.StdOut");
            final InputStream in = process.getInputStream();
            try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
                String line;
                while ((line = reader.readLine()) != null) {
                    stdOutLogger.info(line);
                }
            } catch (IOException e) {
                defaultLogger.error("Failed to read from NiFi Registry's Standard Out stream", e);
            }
        }
    });

    final Future<?> stdErrFuture = loggingExecutor.submit(new Runnable() {
        @Override
        public void run() {
            final Logger stdErrLogger = LoggerFactory.getLogger("org.apache.nifi.registry.StdErr");
            final InputStream in = process.getErrorStream();
            try (final BufferedReader reader = new BufferedReader(new InputStreamReader(in))) {
                String line;
                while ((line = reader.readLine()) != null) {
                    stdErrLogger.error(line);
                }
            } catch (IOException e) {
                defaultLogger.error("Failed to read from NiFi Registry's Standard Error stream", e);
            }
        }
    });

    final Set<Future<?>> futures = new HashSet<>();
    futures.add(stdOutFuture);
    futures.add(stdErrFuture);
    this.loggingFutures = futures;
}

From source file:de.blizzy.documentr.search.PageIndex.java

private SearchResult findPages(String searchText, int page, Authentication authentication,
        IndexSearcher searcher) throws ParseException, IOException, TimeoutException {

    Future<Query> queryFuture = taskExecutor.submit(new ParseQueryTask(searchText, analyzer));
    Bits visibleDocIds = getVisibleDocIds(searcher, authentication);

    Query query;//from w  ww  . j a v  a2s .c o m
    try {
        query = queryFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof ParseException) {
            throw (ParseException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        queryFuture.cancel(false);
    }
    TopDocs docs = searcher.search(query, new PagePermissionFilter(visibleDocIds), HITS_PER_PAGE * page);

    int start = HITS_PER_PAGE * (page - 1);
    int end = Math.min(HITS_PER_PAGE * page, docs.scoreDocs.length);
    IndexReader reader = searcher.getIndexReader();
    List<ListenableFuture<SearchHit>> hitFutures = Lists.newArrayList();
    for (int i = start; i < end; i++) {
        ListenableFuture<SearchHit> hitFuture = taskExecutor
                .submit(new GetSearchHitTask(query, reader, docs.scoreDocs[i].doc, analyzer));
        hitFutures.add(hitFuture);
    }

    try {
        ListenableFuture<List<SearchHit>> allHitsFuture = Futures.allAsList(hitFutures);
        List<SearchHit> hits = allHitsFuture.get(INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        return new SearchResult(hits, docs.totalHits, HITS_PER_PAGE);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IOException) {
            throw (IOException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        for (ListenableFuture<SearchHit> hitFuture : hitFutures) {
            hitFuture.cancel(false);
        }
    }
}

From source file:org.codice.solr.factory.impl.SolrClientAdapter.java

@SuppressWarnings("PMD.CompareObjectsWithEquals" /* purposely testing previous client identity */)
private void finalizeStateChange(boolean notifyAvailability, @Nullable Future<?> futureToCancel,
        @Nullable SolrClient previousClientToClose, boolean swallowIOExceptions) throws IOException {
    if (notifyAvailability) {
        notifyListenersAndInitializers();
    }/*from  w  ww.ja v  a2  s.c  o  m*/
    if ((futureToCancel != null) && !futureToCancel.isDone()) {
        LOGGER.debug("Solr({}): cancelling its previous failsafe task", core);
        futureToCancel.cancel(true);
    }
    // don't close if we still use the same client
    if ((previousClientToClose != null) && (previousClientToClose != realClient)) {
        LOGGER.debug("Solr({}): closing its previous client [{}]", core, previousClientToClose);
        Closeables.close(previousClientToClose, swallowIOExceptions);
    }
}

From source file:de.blizzy.documentr.search.PageFinder.java

private SearchResult findPages(String searchText, int page, Authentication authentication,
        IndexSearcher searcher) throws ParseException, IOException, TimeoutException {

    Future<Query> queryFuture = taskExecutor.submit(new ParseQueryTask(searchText, analyzer));
    ListenableFuture<Bits> visibleDocIdsFuture = taskExecutor.submit(
            new GetVisibleDocIdsTask(searcher, authentication, userStore, permissionEvaluator, taskExecutor));

    Query query;/*w  w  w  . j  a v a  2  s.  c o m*/
    TopDocs docs;
    try {
        query = queryFuture.get(DocumentrConstants.INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        Bits visibleDocIds = visibleDocIdsFuture.get(DocumentrConstants.INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        docs = searcher.search(query, new PagePermissionFilter(visibleDocIds), HITS_PER_PAGE * page);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof ParseException) {
            throw (ParseException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        queryFuture.cancel(false);
        visibleDocIdsFuture.cancel(false);
    }

    int start = HITS_PER_PAGE * (page - 1);
    int end = Math.min(HITS_PER_PAGE * page, docs.scoreDocs.length);
    IndexReader reader = searcher.getIndexReader();
    List<ListenableFuture<SearchHit>> hitFutures = Lists.newArrayList();
    for (int i = start; i < end; i++) {
        ListenableFuture<SearchHit> hitFuture = taskExecutor
                .submit(new GetSearchHitTask(query, reader, docs.scoreDocs[i].doc, analyzer));
        hitFutures.add(hitFuture);
    }

    try {
        ListenableFuture<List<SearchHit>> allHitsFuture = Futures.allAsList(hitFutures);
        List<SearchHit> hits = allHitsFuture.get(DocumentrConstants.INTERACTIVE_TIMEOUT, TimeUnit.SECONDS);
        return new SearchResult(hits, docs.totalHits, HITS_PER_PAGE);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause();
        if (cause instanceof IOException) {
            throw (IOException) cause;
        } else {
            throw Util.toRuntimeException(cause);
        }
    } finally {
        for (ListenableFuture<SearchHit> hitFuture : hitFutures) {
            hitFuture.cancel(false);
        }
    }
}