Example usage for com.google.common.util.concurrent ListeningExecutorService awaitTermination

List of usage examples for com.google.common.util.concurrent ListeningExecutorService awaitTermination

Introduction

In this page you can find the example usage for com.google.common.util.concurrent ListeningExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:com.spotify.sparkey.system.ReloadableReaderExample.java

private static void run() throws IOException, InterruptedException, ExecutionException {
    ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newSingleThreadExecutor());

    // create dummy log/index files, and load the reader from them
    final File logFile = new File("reloadabletest.spl");
    create(Sparkey.getIndexFile(logFile));
    final ReloadableSparkeyReader reader = ReloadableSparkeyReader.fromLogFile(logFile, executorService).get();

    // should be ignored (same file)
    reader.load(logFile);/*from  ww w  .ja  v a2  s  . com*/

    // should load from second file now
    final File logFile2 = new File("reloadabletest2.spl");
    create(Sparkey.getIndexFile(logFile2));
    reader.load(logFile2);

    reader.close();
    executorService.shutdown();
    executorService.awaitTermination(10, TimeUnit.SECONDS);

    System.out.println("Done!");
}

From source file:org.attribyte.relay.AsyncPublisher.java

/**
 * Shutdown the publisher./*ww  w.  ja v a2 s.  c o  m*/
 * @param notificationExecutor The notification executor.
 * @param httpClient The HTTP client.
 * @param maxWaitSeconds The maximum amount of time to be patient for normal shutdown.
 * @throws Exception on shutdown error.
 */
private void shutdown(final ListeningExecutorService notificationExecutor, final HttpClient httpClient,
        final int maxWaitSeconds) throws Exception {
    if (isInit.compareAndSet(true, false)) {
        notificationExecutor.shutdown();
        notificationExecutor.awaitTermination(maxWaitSeconds, TimeUnit.SECONDS);
        if (!notificationExecutor.isShutdown()) {
            notificationExecutor.shutdownNow();
        }
        httpClient.stop();
    }
}

From source file:com.sk89q.worldguard.protection.managers.index.ChunkHashTable.java

/**
 * Waits until all currently executing background tasks complete.
 *
 * @param timeout the maximum time to wait
 * @param unit the time unit of the timeout argument
 * @return {@code true} if this executor terminated and
 *         {@code false} if the timeout elapsed before termination
 * @throws InterruptedException on interruption
 *//*from w  ww  .ja v  a2 s . co m*/
public boolean awaitCompletion(long timeout, TimeUnit unit) throws InterruptedException {
    ListeningExecutorService previousExecutor;
    synchronized (lock) {
        previousExecutor = executor;
        executor = createExecutor();
    }
    previousExecutor.shutdown();
    return previousExecutor.awaitTermination(timeout, unit);
}

From source file:com.facebook.buck.cli.DistBuildCommandDelegate.java

private void killExecutor(ListeningExecutorService stampedeControllerExecutor, String failureWarning)
        throws InterruptedException {
    stampedeControllerExecutor.shutdown();
    if (!stampedeControllerExecutor.awaitTermination(STAMPEDE_EXECUTOR_SHUTDOWN_TIMEOUT_MILLIS,
            TimeUnit.MILLISECONDS)) {
        LOG.warn(failureWarning);//  w  w w . jav a2  s  .  c  o  m
        stampedeControllerExecutor.shutdownNow();
    }
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java

private void shutdown(final ListeningExecutorService executorService) {
    if (executorService != null) {
        executorService.shutdown();//from   ww w  .  j  ava  2  s  .  co m
        try {
            // Wait a while for existing tasks to terminate
            if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                executorService.shutdownNow(); // Cancel currently executing tasks
                // Wait a while for tasks to respond to being cancelled
                if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                    log.warn("Thread pool for metacat refresh did not terminate");
                }
            }
        } catch (InterruptedException ie) {
            // (Re-)Cancel if current thread also interrupted
            executorService.shutdownNow();
            // Preserve interrupt status
            Thread.currentThread().interrupt();
        }
    }
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchRefresh.java

private void shutdown(@Nullable final ListeningExecutorService executorService) {
    if (executorService != null) {
        executorService.shutdown();//from www.j  a  va  2s  .c om
        try {
            // Wait a while for existing tasks to terminate
            if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                executorService.shutdownNow(); // Cancel currently executing tasks
                // Wait a while for tasks to respond to being cancelled
                if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                    log.warn("Thread pool for metacat refresh did not terminate");
                }
            }
        } catch (InterruptedException ie) {
            // (Re-)Cancel if current thread also interrupted
            executorService.shutdownNow();
            // Preserve interrupt status
            Thread.currentThread().interrupt();
        }
    }
}

From source file:org.sosy_lab.cpachecker.core.algorithm.ParallelAlgorithm.java

@Override
public AlgorithmStatus run(ReachedSet pReachedSet) throws CPAException, InterruptedException {
    mainEntryNode = AbstractStates.extractLocation(pReachedSet.getFirstState());
    ForwardingReachedSet forwardingReachedSet = (ForwardingReachedSet) pReachedSet;

    ListeningExecutorService exec = listeningDecorator(newFixedThreadPool(configFiles.size()));
    List<ListenableFuture<ParallelAnalysisResult>> futures = new ArrayList<>();

    for (AnnotatedValue<Path> p : configFiles) {
        futures.add(exec.submit(createParallelAnalysis(p, ++stats.noOfAlgorithmsUsed)));
    }/*w ww. jav a2 s.  c  o  m*/

    // shutdown the executor service,
    exec.shutdown();

    handleFutureResults(futures);

    // wait some time so that all threads are shut down and have (hopefully) finished their logging
    if (!exec.awaitTermination(10, TimeUnit.SECONDS)) {
        logger.log(Level.WARNING, "Not all threads are terminated although we have a result.");
    }

    exec.shutdownNow();

    if (finalResult != null) {
        forwardingReachedSet.setDelegate(finalResult.getReached());
        return finalResult.getStatus();
    }

    return AlgorithmStatus.UNSOUND_AND_PRECISE;
}

From source file:com.b2international.index.es.EsDocumentWriter.java

@Override
public void commit() throws IOException {
    if (indexOperations.isEmpty() && deleteOperations.isEmpty() && updateOperations.isEmpty()) {
        return;//w  w w  .j  a va  2  s  .  c  om
    }

    final Set<DocumentMapping> mappingsToRefresh = Collections.synchronizedSet(newHashSet());
    final EsClient client = admin.client();
    // apply bulk updates first
    final ListeningExecutorService executor;
    if (updateOperations.size() > 1) {
        executor = MoreExecutors
                .listeningDecorator(Executors.newFixedThreadPool(Math.min(4, updateOperations.size())));
    } else {
        executor = MoreExecutors.newDirectExecutorService();
    }
    final List<ListenableFuture<?>> updateFutures = newArrayList();
    for (BulkUpdate<?> update : updateOperations) {
        updateFutures.add(executor.submit(() -> bulkUpdate(client, update, mappingsToRefresh)));
    }
    try {
        executor.shutdown();
        Futures.allAsList(updateFutures).get();
        executor.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException | ExecutionException e) {
        throw new IndexException("Couldn't execute bulk updates", e);
    }

    // then bulk indexes/deletes
    if (!indexOperations.isEmpty() || !deleteOperations.isEmpty()) {
        final BulkProcessor processor = client.bulk(new BulkProcessor.Listener() {
            @Override
            public void beforeBulk(long executionId, BulkRequest request) {
                admin.log().debug("Sending bulk request {}", request.numberOfActions());
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                admin.log().error("Failed bulk request", failure);
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                admin.log().debug("Successfully processed bulk request ({}) in {}.", request.numberOfActions(),
                        response.getTook());
                if (response.hasFailures()) {
                    for (BulkItemResponse itemResponse : response.getItems()) {
                        checkState(!itemResponse.isFailed(), "Failed to commit bulk request in index '%s', %s",
                                admin.name(), itemResponse.getFailureMessage());
                    }
                }
            }
        }).setConcurrentRequests(getConcurrencyLevel()).setBulkActions(10_000)
                .setBulkSize(new ByteSizeValue(10L, ByteSizeUnit.MB)).build();

        for (Class<?> type : ImmutableSet.copyOf(indexOperations.rowKeySet())) {
            final Map<String, Object> indexOperationsForType = indexOperations.row(type);

            final DocumentMapping mapping = admin.mappings().getMapping(type);
            final String typeString = mapping.typeAsString();
            final String typeIndex = admin.getTypeIndex(mapping);

            mappingsToRefresh.add(mapping);

            for (Entry<String, Object> entry : Iterables.consumingIterable(indexOperationsForType.entrySet())) {
                final String id = entry.getKey();
                if (!deleteOperations.containsValue(id)) {
                    final Object obj = entry.getValue();
                    final Set<String> hashedFields = mapping.getHashedFields();
                    final byte[] _source;

                    if (!hashedFields.isEmpty()) {
                        final ObjectNode objNode = mapper.valueToTree(obj);
                        final ObjectNode hashedNode = mapper.createObjectNode();

                        // Preserve property order, share references with objNode
                        for (String hashedField : hashedFields) {
                            JsonNode value = objNode.get(hashedField);
                            if (value != null && !value.isNull()) {
                                hashedNode.set(hashedField, value);
                            }
                        }

                        final byte[] hashedBytes = mapper.writeValueAsBytes(hashedNode);
                        final HashCode hashCode = Hashing.sha1().hashBytes(hashedBytes);

                        // Inject the result as an extra field into the to-be-indexed JSON content
                        objNode.put(DocumentMapping._HASH, hashCode.toString());
                        _source = mapper.writeValueAsBytes(objNode);

                    } else {
                        _source = mapper.writeValueAsBytes(obj);
                    }

                    processor.add(new IndexRequest(typeIndex, typeString, id).opType(OpType.INDEX)
                            .source(_source, XContentType.JSON));
                }
            }

            for (String id : deleteOperations.removeAll(type)) {
                processor.add(new DeleteRequest(typeIndex, typeString, id));
            }

            // Flush processor between index boundaries
            processor.flush();
        }

        // Remaining delete operations can be executed on their own
        for (Class<?> type : ImmutableSet.copyOf(deleteOperations.keySet())) {
            final DocumentMapping mapping = admin.mappings().getMapping(type);
            final String typeString = mapping.typeAsString();
            final String typeIndex = admin.getTypeIndex(mapping);

            mappingsToRefresh.add(mapping);

            for (String id : deleteOperations.removeAll(type)) {
                processor.add(new DeleteRequest(typeIndex, typeString, id));
            }

            // Flush processor between index boundaries
            processor.flush();
        }

        try {
            processor.awaitClose(5, TimeUnit.MINUTES);
        } catch (InterruptedException e) {
            throw new IndexException("Interrupted bulk processing part of the commit", e);
        }
    }

    // refresh the index if there were only updates
    admin.refresh(mappingsToRefresh);
}

From source file:qa.qcri.nadeef.core.pipeline.Iterator.java

/**
 * Iterator operator execution./*from  w  w w  .  j  av a2 s  .  com*/
 *
 * @param blocks input tables.
 * @return iteration output.
 */
@Override
@SuppressWarnings("unchecked")
public Boolean execute(Collection<Table> blocks) {
    Tracer tracer = Tracer.getTracer(Iterator.class);
    ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("iterator-pool-%d").build();
    ExecutorService executor = Executors.newFixedThreadPool(MAX_THREAD_NUM, factory);
    ListeningExecutorService service = MoreExecutors.listeningDecorator(executor);

    Stopwatch stopwatch = Stopwatch.createStarted();
    blockCount = 0;

    ExecutionContext context = getCurrentContext();
    Rule rule = context.getRule();
    try {
        if (rule.supportTwoTables()) {
            // Rule runs on two tables.
            ListenableFuture<Integer> future = service
                    .submit(new IteratorCallable(blocks, rule, context.getNewTuples()));
            blockCount++;
            setPercentage(1.0f);
        } else {
            // Rule runs on each table.
            for (Table table : blocks) {
                ListenableFuture<Integer> future = service
                        .submit(new IteratorCallable(table, rule, context.getNewTuples()));
                Futures.addCallback(future, new IteratorCallback(blocks.size()));
            }
        }

        // wait until all the tasks are finished
        service.shutdown();
        while (!service.awaitTermination(10l, TimeUnit.MINUTES))
            ;

        // recycle the collection when dealing with pairs. This is mainly used to remove refs.
        if (rule instanceof PairTupleRule) {
            for (Table block : blocks) {
                block.recycle();
            }
        }

        // mark the end of the iteration output
        IteratorBlockingQueue.markEnd();
    } catch (InterruptedException ex) {
        tracer.err("Iterator is interrupted.", ex);
    } finally {
        executor.shutdown();
    }

    PerfReport.appendMetric(PerfReport.Metric.IteratorTime, stopwatch.elapsed(TimeUnit.MILLISECONDS));

    stopwatch.stop();
    return true;
}

From source file:de.cuseb.bilderbuch.images.ImageSearchService.java

public ImageResponse searchImages(final String query) {

    final ImageResponse response = new ImageResponse();
    final ListeningExecutorService executor = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(searches.size()));

    for (final ImageSearch search : searches.values()) {

        if (!search.isEnabled()) {
            continue;
        }/*from w  ww  . j  a v  a  2  s  .  com*/

        ListenableFuture<List<Image>> searchResult = executor.submit(new Callable<List<Image>>() {
            @Override
            public List<Image> call() throws Exception {
                log.debug("starting enabled search " + search.getClass().getSimpleName());
                return search.searchImages(query);
            }
        });

        Futures.addCallback(searchResult, new FutureCallback<List<Image>>() {
            @Override
            public void onSuccess(List<Image> result) {
                log.debug(search.getClass().getSimpleName() + " result size: " + result.size());
                response.addImages(result);
            }

            @Override
            public void onFailure(Throwable t) {
                log.error(search.getClass().getSimpleName(), t);
            }
        });
    }

    try {
        executor.shutdown();
        executor.awaitTermination(timeout, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        log.error("awaitTermination interrupted", e);
    }

    if (shuffle) {
        log.debug("shuffling result");
        response.shuffle();
    }

    return response;
}