Example usage for java.util.concurrent ThreadPoolExecutor shutdownNow

List of usage examples for java.util.concurrent ThreadPoolExecutor shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor shutdownNow.

Prototype

public List<Runnable> shutdownNow() 

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.qmetry.qaf.automation.integration.ResultUpdator.java

public static void awaitTermination() {
    if (hasActivePool) {
        ThreadPoolExecutor pool = getPool();
        while (pool.getActiveCount() > 0) {
            logger.info("Result updator : Remaining " + pool.getActiveCount() + " result to be update.");
            try {
                pool.awaitTermination(5, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                e.printStackTrace();//from   ww  w .j ava2  s . c  o  m
            }
        }
        System.out.println("Result updator : Remaining " + pool.getActiveCount() + " result to be update.");
        try {
            pool.shutdownNow();
        } catch (Exception e) {
            e.printStackTrace();
        }
        hasActivePool = false;
    }
}

From source file:eu.artofcoding.beetlejuice.spring.SpringContextHelper.java

private void stopExecutors() {
    ThreadPoolTaskExecutor springTaskExecutor = applicationContext.getBean("taskExecutor",
            ThreadPoolTaskExecutor.class);
    ThreadPoolExecutor springExecutor = springTaskExecutor.getThreadPoolExecutor();
    springExecutor.shutdownNow();
    Map<String, ThreadPoolTaskExecutor> map = applicationContext.getBeansOfType(ThreadPoolTaskExecutor.class);
    ThreadPoolTaskExecutor t = null;// w w w . ja v a 2s.  c o m
    for (String key : map.keySet()) {
        t = map.get(key);
        final ThreadPoolExecutor executor = t.getThreadPoolExecutor();
        executor.shutdownNow();
        logger.info(
                String.format("%s: active after shutdown: %d", executor.toString(), executor.getActiveCount()));
        logger.info(String.format("%s: completed after shutdown: %d", executor.toString(),
                executor.getCompletedTaskCount()));
    }
}

From source file:com.all.downloader.p2p.phexcore.PhexCoreImpl.java

@PreDestroy
public void shutdown() {
    if (servent.isRunning()) {
        try {/*  w w  w.ja v  a2  s .  c  om*/
            servent.stop();
        } catch (Exception e) {
            log.error(e, e);
        }
        ;
    }

    log.info("Shutting down timer service...");
    Environment.getInstance().getTimerService().cancel();

    log.info("Shutting down phex pool thread...");
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Environment.getInstance().getThreadPool();
    threadPool.shutdownNow();
    scheduler.shutdownNow();
}

From source file:dk.netarkivet.harvester.indexserver.CrawlLogIndexCache.java

/**
 * Try to release all resources connected to the given ThreadPoolExecutor.
 * @param executor a ThreadPoolExecutor//www.  j ava  2s  .  c  o m
 */
private void closeDownThreadpoolQuietly(ThreadPoolExecutor executor) {
    if (executor == null) {
        return;
    }
    if (!executor.isShutdown()) {
        executor.shutdownNow();
    }
}

From source file:org.apache.cxf.systest.jaxrs.JAXRSCxfContinuationsTest.java

private void doTestContinuation(String pathSegment) throws Exception {
    ThreadPoolExecutor executor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(10));
    CountDownLatch startSignal = new CountDownLatch(1);
    CountDownLatch doneSignal = new CountDownLatch(1);

    executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/1", "1",
            "CXF in Action1", startSignal, doneSignal));
    startSignal.countDown();/*  w w w . ja v  a  2s.  c  o  m*/
    doneSignal.await(60, TimeUnit.SECONDS);
    executor.shutdownNow();
    assertEquals("Not all invocations have completed", 0, doneSignal.getCount());
}

From source file:org.apache.cxf.systest.jaxrs.AbstractJAXRSContinuationsTest.java

protected void doTestContinuation(String pathSegment) throws Exception {
    final String port = getPort();
    ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 5, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(10));
    CountDownLatch startSignal = new CountDownLatch(1);
    CountDownLatch doneSignal = new CountDownLatch(1);
    List<BookWorker> workers = new ArrayList<>(5);
    for (int x = 1; x < 6; x++) {
        workers.add(new BookWorker("http://localhost:" + port + getBaseAddress() + pathSegment + "/" + x,
                Integer.toString(x), "CXF in Action" + x, startSignal, doneSignal));
    }/*from w ww  .  j a  v  a 2  s  .  c o m*/
    for (BookWorker w : workers) {
        executor.execute(w);
    }

    startSignal.countDown();
    doneSignal.await(60, TimeUnit.SECONDS);
    executor.shutdownNow();
    assertEquals("Not all invocations have completed", 0, doneSignal.getCount());
    for (BookWorker w : workers) {
        w.checkError();
    }
}

From source file:io.anserini.index.IndexCollection.java

public void run() throws IOException, InterruptedException {
    final long start = System.nanoTime();
    LOG.info("Starting indexer...");

    int numThreads = args.threads;

    final Directory dir = FSDirectory.open(indexPath);
    final EnglishAnalyzer analyzer = args.keepStopwords ? new EnglishAnalyzer(CharArraySet.EMPTY_SET)
            : new EnglishAnalyzer();
    final IndexWriterConfig config = new IndexWriterConfig(analyzer);
    config.setSimilarity(new BM25Similarity());
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    config.setRAMBufferSizeMB(args.memorybufferSize);
    config.setUseCompoundFile(false);/*w  ww . j av a  2  s.  c  o  m*/
    config.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, config);

    final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads);
    final List<Path> segmentPaths = collection.getFileSegmentPaths();

    final int segmentCnt = segmentPaths.size();
    LOG.info(segmentCnt + " files found in " + collectionPath.toString());
    for (int i = 0; i < segmentCnt; i++) {
        executor.execute(new IndexerThread(writer, collection, segmentPaths.get(i)));
    }

    executor.shutdown();

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(1, TimeUnit.MINUTES)) {
            LOG.info(String.format("%.2f percent completed",
                    (double) executor.getCompletedTaskCount() / segmentCnt * 100.0d));
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    if (segmentCnt != executor.getCompletedTaskCount()) {
        throw new RuntimeException("totalFiles = " + segmentCnt + " is not equal to completedTaskCount =  "
                + executor.getCompletedTaskCount());
    }

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (args.optimize)
            writer.forceMerge(1);
    } finally {
        try {
            writer.close();
        } catch (IOException e) {
            // It is possible that this happens... but nothing much we can do at this point,
            // so just log the error and move on.
            LOG.error(e);
        }
    }

    LOG.info("Indexed documents: " + counters.indexedDocuments.get());
    LOG.info("Empty documents: " + counters.emptyDocuments.get());
    LOG.info("Errors: " + counters.errors.get());

    final long durationMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    LOG.info("Total " + numIndexed + " documents indexed in "
            + DurationFormatUtils.formatDuration(durationMillis, "HH:mm:ss"));
}

From source file:org.esigate.test.cases.PerformanceTestCase.java

/**
 * Execute la tache avec plusieurs Threads
 * //from  w  w  w  .j  a  va 2 s  .co  m
 * @param request
 * @return
 * @throws Exception
 */
private long execute(HttpGetRequestRunnable request, int numberOfRequests, int threads) throws Exception {
    connectionManager = new PoolingHttpClientConnectionManager();
    httpClient = HttpClientBuilder.create().setConnectionManager(connectionManager).setMaxConnTotal(threads)
            .setMaxConnPerRoute(threads).setDefaultRequestConfig(
                    RequestConfig.custom().setConnectTimeout(10000).setSocketTimeout(10000).build())
            .build();
    // Warm up
    request.run();

    BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>();
    ThreadPoolExecutor threadPool = new ThreadPoolExecutor(threads, threads, 5, TimeUnit.SECONDS, queue);

    long start = System.currentTimeMillis();
    threadPool.prestartAllCoreThreads();
    for (int i = 0; i < numberOfRequests; i++) {
        threadPool.submit(request);
    }
    threadPool.shutdown();

    // wait maximum 20 s
    threadPool.awaitTermination(200, TimeUnit.SECONDS);
    connectionManager.shutdown();

    if (request.exception != null) {
        throw new AssertionFailedError(
                "Exception for request " + request.url + " after " + request.count + " requests",
                request.exception);
    }
    if (threadPool.getCompletedTaskCount() < threadPool.getTaskCount()) {
        // All task were not executed
        String msg = request.url + " : Only " + threadPool.getCompletedTaskCount() + "/"
                + threadPool.getTaskCount() + " have been renderered " + " => Maybe a performance issue";
        threadPool.shutdownNow();
        fail(msg);
    }

    long end = System.currentTimeMillis();
    long execTime = end - start;
    LOG.debug("Executed request " + request.url + " " + numberOfRequests + " times with " + threads
            + " threads in " + execTime + "ms");
    return execTime;

}

From source file:com.meltmedia.cadmium.servlets.ClassLoaderLeakPreventor.java

/**
 * Partially inspired by org.apache.catalina.loader.WebappClassLoader.clearReferencesThreads()
 *//*from www  . j  av  a 2  s  .c  om*/
@SuppressWarnings("deprecation")
protected void stopThreads() {
    final Class<?> workerClass = findClass("java.util.concurrent.ThreadPoolExecutor$Worker");
    final Field oracleTarget = findField(Thread.class, "target"); // Sun/Oracle JRE
    final Field ibmRunnable = findField(Thread.class, "runnable"); // IBM JRE

    for (Thread thread : getAllThreads()) {
        @SuppressWarnings("RedundantCast")
        final Runnable runnable = (oracleTarget != null) ? (Runnable) getFieldValue(oracleTarget, thread) : // Sun/Oracle JRE  
                (Runnable) getFieldValue(ibmRunnable, thread); // IBM JRE

        if (thread != Thread.currentThread() && // Ignore current thread
                (isThreadInWebApplication(thread) || isLoadedInWebApplication(runnable))) {

            if (thread.getClass().getName().startsWith(JURT_ASYNCHRONOUS_FINALIZER)) {
                // Note, the thread group of this thread may be "system" if it is triggered by the Garbage Collector
                // however if triggered by us in forceStartOpenOfficeJurtCleanup() it may depend on the application server
                if (stopThreads) {
                    info("Found JURT thread " + thread.getName() + "; starting "
                            + JURTKiller.class.getSimpleName());
                    new JURTKiller(thread).start();
                } else
                    warn("JURT thread " + thread.getName() + " is still running in web app");
            } else if (thread.getThreadGroup() != null && ("system".equals(thread.getThreadGroup().getName()) || // System thread
                    "RMI Runtime".equals(thread.getThreadGroup().getName()))) { // RMI thread (honestly, just copied from Tomcat)

                if ("Keep-Alive-Timer".equals(thread.getName())) {
                    thread.setContextClassLoader(getWebApplicationClassLoader().getParent());
                    debug("Changed contextClassLoader of HTTP keep alive thread");
                }
            } else if (thread.isAlive()) { // Non-system, running in web app

                if ("java.util.TimerThread".equals(thread.getClass().getName())) {
                    if (stopTimerThreads) {
                        warn("Stopping Timer thread running in classloader.");
                        stopTimerThread(thread);
                    } else {
                        info("Timer thread is running in classloader, but will not be stopped");
                    }
                } else {
                    // If threads is running an java.util.concurrent.ThreadPoolExecutor.Worker try shutting down the executor
                    if (workerClass != null && workerClass.isInstance(runnable)) {
                        if (stopThreads) {
                            warn("Shutting down " + ThreadPoolExecutor.class.getName()
                                    + " running within the classloader.");
                            try {
                                // java.util.concurrent.ThreadPoolExecutor, introduced in Java 1.5
                                final Field workerExecutor = findField(workerClass, "this$0");
                                final ThreadPoolExecutor executor = getFieldValue(workerExecutor, runnable);
                                executor.shutdownNow();
                            } catch (Exception ex) {
                                error(ex);
                            }
                        } else
                            info(ThreadPoolExecutor.class.getName()
                                    + " running within the classloader will not be shut down.");
                    }

                    final String displayString = "'" + thread + "' of type " + thread.getClass().getName();

                    if (stopThreads) {
                        final String waitString = (threadWaitMs > 0) ? "after " + threadWaitMs + " ms " : "";
                        warn("Stopping Thread " + displayString + " running in web app " + waitString);

                        if (threadWaitMs > 0) {
                            try {
                                thread.join(threadWaitMs); // Wait for thread to run
                            } catch (InterruptedException e) {
                                // Do nothing
                            }
                        }

                        // Normally threads should not be stopped (method is deprecated), since it may cause an inconsistent state.
                        // In this case however, the alternative is a classloader leak, which may or may not be considered worse.
                        if (thread.isAlive())
                            thread.stop();
                    } else {
                        warn("Thread " + displayString + " is still running in web app");
                    }

                }
            }
        }
    }
}

From source file:com.emc.ecs.smart.SmartUploader.java

/**
 * Performs a segmented upload to ECS using the SmartClient and the ECS byte range PUT extensions.  The upload
 * URL will be parsed and the hostname will be enumerated in DNS to see if it contains multiple 'A' records.  If
 * so, those will be used to populate the software load balancer.
 *///from w  w w .  j ava 2s  . c o m
private void doSegmentedUpload() {
    try {
        long start = System.currentTimeMillis();
        fileSize = Files.size(fileToUpload);

        // Verify md5Save file path is legit.
        PrintWriter pw = null;
        try {
            if (saveMD5 != null) {
                pw = new PrintWriter(saveMD5);
            }
        } catch (IOException e) {
            System.err.println("Invalid path specified to save local file MD5: " + e.getMessage());
            System.exit(3);
        }

        // Figure out which segment size to use.
        if (segmentSize == -1) {
            if (fileSize >= LARGE_SEGMENT) {
                segmentSize = LARGE_SEGMENT;
            } else {
                segmentSize = SMALL_SEGMENT;
            }
        }

        // Expand the host
        String host = uploadUrl.getHost();
        InetAddress addr = InetAddress.getByName(host);
        List<String> ipAddresses = new ArrayList<>();
        try {
            ipAddresses = getIPAddresses(host);
        } catch (NamingException e) {
            LogMF.warn(l4j, "Could not resolve hostname: {0}: {1}.  Using as-is.", host, e);
            ipAddresses.add(host);
        }
        LogMF.info(l4j, "Host {0} resolves to {1}", host, ipAddresses);

        // Initialize the SmartClient
        SmartConfig smartConfig = new SmartConfig(ipAddresses.toArray(new String[ipAddresses.size()]));
        // We don't need to update the host list
        smartConfig.setHostUpdateEnabled(false);

        // Configure the load balancer
        Client pingClient = SmartClientFactory.createStandardClient(smartConfig,
                new URLConnectionClientHandler());
        pingClient.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        LoadBalancer loadBalancer = smartConfig.getLoadBalancer();
        EcsHostListProvider hostListProvider = new EcsHostListProvider(pingClient, loadBalancer, null, null);
        hostListProvider.setProtocol(uploadUrl.getProtocol());
        if (uploadUrl.getPort() != -1) {
            hostListProvider.setPort(uploadUrl.getPort());
        }
        smartConfig.setHostListProvider(hostListProvider);

        client = SmartClientFactory.createSmartClient(smartConfig, new URLConnectionClientHandler());

        // Add our retry handler
        client.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        client.addFilter(new MD5CheckFilter());
        client.addFilter(new RetryFilter(retryDelay, retryCount));

        // Create a FileChannel for the upload
        fileChannel = new RandomAccessFile(fileToUpload.toFile(), "r").getChannel();

        System.out.printf("Starting upload at %s\n", new Date().toString());
        // The first upload is done without a range to create the initial object.
        doUploadSegment(0);

        // See how many more segments we have
        int segmentCount = (int) (fileSize / (long) segmentSize);
        long remainder = fileSize % segmentSize;
        if (remainder != 0) {
            // Additional bytes at end
            segmentCount++;
        }

        if (segmentCount > 1) {
            // Build a thread pool to upload the segments.
            ThreadPoolExecutor executor = new ThreadPoolExecutor(threadCount, threadCount, 15, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>());

            for (int i = 1; i < segmentCount; i++) {
                executor.execute(new SegmentUpload(i));
            }

            // Wait for completion
            while (true) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                if (failed) {
                    // Abort!
                    l4j.warn("Error detected, terminating upload");
                    executor.shutdownNow();
                    break;
                }
                if (executor.getQueue().isEmpty()) {
                    l4j.info("All tasks complete, awaiting shutdown");
                    try {
                        executor.shutdown();
                        executor.awaitTermination(1, TimeUnit.MINUTES);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    break;
                }
            }
        }

        // Done!
        long elapsed = System.currentTimeMillis() - start;
        printRate(fileSize, elapsed);

        // Release buffers
        LogMF.debug(l4j, "buffer count at end: {0}", buffers.size());
        buffers = new LinkedList<>();
        System.out.printf("\nUpload completed at %s\n", new Date().toString());

        // Verify
        if (verifyUrl != null) {

            System.out.printf("starting remote MD5...\n");

            String objectMD5 = computeObjectMD5();
            System.out.printf("Object MD5 = %s\n", objectMD5);

            System.out.printf("Remote MD5 complete at %s\nStarting local MD5\n", new Date().toString());

            // At this point we don't need the clients anymore.
            l4j.debug("Shutting down SmartClient");
            SmartClientFactory.destroy(client);
            SmartClientFactory.destroy(pingClient);

            String fileMD5 = standardChecksum ? computeFileMD5Standard() : computeFileMD5();
            System.out.printf("\nFile on disk MD5 = %s\n", fileMD5);
            System.out.printf("Local MD5 complete at %s\n", new Date().toString());
            if (!fileMD5.equals(objectMD5)) {
                System.err.printf("ERROR: file MD5 does not match object MD5! %s != %s", fileMD5, objectMD5);
                System.exit(10);
            }

            if (saveMD5 != null && pw != null) {
                pw.write(fileMD5);
                pw.close();
            }

            System.out.printf("\nObject verification passed!\n");
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(4);
    }
}