Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:gov.ca.cwds.cals.service.ComplaintsService.java

@SuppressWarnings("squid:S2142") //Logging and informing client instead of shutdown
private void shutdownExecutionService(ExecutorService executorService) {
    executorService.shutdown();/*w  w  w.j a  v a 2  s .  c o  m*/
    try {
        if (!executorService.awaitTermination(1, TimeUnit.MINUTES)) {
            executorService.shutdownNow();
        }
    } catch (InterruptedException e) {
        String message = "Can't properly shutdown complaints execution pool";
        LOGGER.warn(message, e);
        throw new ServiceException(message, e);
    }
}

From source file:com.barchart.udt.TestSocketFile.java

/**
 * verify basic file send/receive//from   w  w  w  . j av  a2 s.c o m
 */
@Test(timeout = 10 * 1000)
public void fileTransfer() throws Exception {

    final InetSocketAddress addr1 = localSocketAddress();
    final InetSocketAddress addr2 = localSocketAddress();

    final SocketUDT peer1 = new SocketUDT(TypeUDT.STREAM);
    final SocketUDT peer2 = new SocketUDT(TypeUDT.STREAM);

    peer1.setBlocking(false);
    peer2.setBlocking(false);

    peer1.setRendezvous(true);
    peer2.setRendezvous(true);

    peer1.bind(addr1);
    peer2.bind(addr2);

    socketAwait(peer1, StatusUDT.OPENED);
    socketAwait(peer2, StatusUDT.OPENED);

    peer1.connect(addr2);
    peer2.connect(addr1);

    socketAwait(peer1, StatusUDT.CONNECTED);
    socketAwait(peer2, StatusUDT.CONNECTED);

    log.info("state 0 - connected");
    log.info("peer1 : {}", peer1);
    log.info("peer2 : {}", peer2);

    final int size = 64 * 1024;

    final Random random = new Random(0);
    final byte[] array1 = new byte[size];
    final byte[] array2 = new byte[size];
    random.nextBytes(array1);
    random.nextBytes(array2);

    final File folder = new File("./target/file");
    folder.mkdirs();

    final File source = File.createTempFile("source", "data", folder);
    final File target = File.createTempFile("target", "data", folder);

    FileUtils.writeByteArrayToFile(source, array1);
    FileUtils.writeByteArrayToFile(target, array2);

    assertEquals(size, source.length());
    assertEquals(size, target.length());

    assertFalse("files are different", FileUtils.contentEquals(source, target));

    // sender
    final Runnable task1 = new Runnable() {
        @Override
        public void run() {
            try {
                log.info("init send");
                final long length = peer1.sendFile(source, 0, size);
                assertEquals(length, size);
            } catch (final Exception e) {
                log.error("", e);
            }
        }
    };

    // receiver
    final Runnable task2 = new Runnable() {
        @Override
        public void run() {
            try {
                log.info("init recv");
                final long length = peer2.receiveFile(target, 0, size);
                assertEquals(length, size);
            } catch (final Exception e) {
                log.error("", e);
            }
        }
    };

    final ExecutorService executor = Executors.newFixedThreadPool(2);

    executor.submit(task1);
    executor.submit(task2);

    Thread.sleep(5 * 1000);

    executor.shutdownNow();

    assertTrue("files are the same", FileUtils.contentEquals(source, target));

    peer1.close();
    peer2.close();

}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java

private static void move(Counter counter, FileSystem fromFs, Path from, FileSystem toFs, Path to,
        boolean fromLocal, int threads) throws IOException, InterruptedException {
    if (counter == null) {
        throw new IllegalArgumentException("counter must not be null"); //$NON-NLS-1$
    }/*  w w  w  .j  a v  a2  s .c  om*/
    if (fromFs == null) {
        throw new IllegalArgumentException("fromFs must not be null"); //$NON-NLS-1$
    }
    if (from == null) {
        throw new IllegalArgumentException("from must not be null"); //$NON-NLS-1$
    }
    if (toFs == null) {
        throw new IllegalArgumentException("toFs must not be null"); //$NON-NLS-1$
    }
    if (to == null) {
        throw new IllegalArgumentException("to must not be null"); //$NON-NLS-1$
    }
    if (fromLocal && isLocalPath(from) == false) {
        throw new IllegalArgumentException("from must be on local file system"); //$NON-NLS-1$
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Start moving files (from={0}, to={1})", //$NON-NLS-1$
                from, to));
    }
    Path source = fromFs.makeQualified(from);
    Path target = toFs.makeQualified(to);
    List<Path> list = createFileListRelative(counter, fromFs, source);
    if (list.isEmpty()) {
        return;
    }
    boolean parallel = threads > 1 && list.size() >= PARALLEL_MOVE_MIN;
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Process moving files (from={0}, to={1}, count={2}, parallel={3})", //$NON-NLS-1$
                from, to, list.size(), parallel ? threads : "N/A")); //$NON-NLS-1$
    }
    if (parallel) {
        ExecutorService executor = Executors.newFixedThreadPool(Math.min(threads, list.size()),
                DAEMON_THREAD_FACTORY);
        try {
            moveParallel(counter, fromFs, toFs, source, target, list, fromLocal, executor);
        } finally {
            executor.shutdownNow();
        }
    } else {
        moveSerial(counter, fromFs, toFs, source, target, list, fromLocal);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(MessageFormat.format("Finish moving files (from={0}, to={1}, count={2})", //$NON-NLS-1$
                from, to, list.size()));
    }
}

From source file:org.paxml.launch.PaxmlRunner.java

/**
 * Run the computed launch model with thread pool. It will run with default
 * 4 threads if not specifically specified in the launch model.
 * //from  w  ww  . ja va2s . c o  m
 * @param model
 *            the model containing the launch points
 */
public static void run(LaunchModel model, long executionId) {

    List<LaunchPoint> points = model.getLaunchPoints(false, executionId);
    if (log.isInfoEnabled()) {
        log.info("Found " + points.size() + " Paxml files to execute based on plan file: "
                + model.getPlanEntity().getResource().getPath());
    }
    if (points.isEmpty()) {
        return;
    }
    final int poolSize = model.getConcurrency() <= 0 ? Math.min(DEFAULT_CONCURRENCY, points.size())
            : model.getConcurrency();
    ExecutorService pool = Executors.newFixedThreadPool(poolSize);
    for (final LaunchPoint point : points) {
        pool.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    Context.cleanCurrentThreadContext();

                    logExecution(point.getResource(), true);

                    point.execute();
                } catch (Throwable t) {
                    if (log.isErrorEnabled()) {
                        log.error(findMessage(t), t);
                    }
                } finally {
                    logExecution(point.getResource(), false);
                }
            }

        });
    }

    try {
        pool.shutdown();
        // wait forever in a loop
        while (!pool.awaitTermination(1, TimeUnit.MINUTES)) {
            if (log.isDebugEnabled()) {
                log.debug("Waiting for all executors to finish...");
            }
        }

    } catch (InterruptedException e) {
        throw new PaxmlRuntimeException("Cannot wait for all executors to finish", e);
    } finally {
        pool.shutdownNow();
    }

}

From source file:com.meltmedia.cadmium.core.git.GitService.java

/**
 * Initializes war content directory for a Cadmium war.
 * @param uri The remote Git repository ssh URI.
 * @param branch The remote branch to checkout.
 * @param root The shared content root./*  ww w. j a v  a  2  s.c  om*/
 * @param warName The name of the war file.
 * @param historyManager The history manager to log the initialization event.
 * @return A GitService object the points to the freshly cloned Git repository.
 * @throws RefNotFoundException
 * @throws Exception
 */
public static GitService initializeContentDirectory(String uri, String branch, String root, String warName,
        HistoryManager historyManager, ConfigManager configManager) throws Exception {
    initializeBaseDirectoryStructure(root, warName);
    String warDir = FileSystemManager.getChildDirectoryIfExists(root, warName);

    Properties configProperties = configManager.getDefaultProperties();

    GitLocation gitLocation = new GitLocation(uri, branch, configProperties.getProperty("git.ref.sha"));
    GitService cloned = initializeRepo(gitLocation, warDir, "git-checkout");
    try {
        String renderedContentDir = initializeSnapshotDirectory(warDir, configProperties,
                "com.meltmedia.cadmium.lastUpdated", "git-checkout", "renderedContent");

        boolean hasExisting = configProperties.containsKey("com.meltmedia.cadmium.lastUpdated")
                && renderedContentDir != null
                && renderedContentDir.equals(configProperties.getProperty("com.meltmedia.cadmium.lastUpdated"));
        if (renderedContentDir != null) {
            configProperties.setProperty("com.meltmedia.cadmium.lastUpdated", renderedContentDir);
        }
        configProperties.setProperty("branch", cloned.getBranchName());
        configProperties.setProperty("git.ref.sha", cloned.getCurrentRevision());
        configProperties.setProperty("repo", cloned.getRemoteRepository());

        if (renderedContentDir != null) {
            String sourceFilePath = renderedContentDir + File.separator + "MET-INF" + File.separator + "source";
            if (sourceFilePath != null && FileSystemManager.canRead(sourceFilePath)) {
                try {
                    configProperties.setProperty("source", FileSystemManager.getFileContents(sourceFilePath));
                } catch (Exception e) {
                    log.warn("Failed to read source file {}", sourceFilePath);
                }
            } else if (!configProperties.containsKey("source")) {
                configProperties.setProperty("source", "{}");
            }
        } else if (!configProperties.containsKey("source")) {
            configProperties.setProperty("source", "{}");
        }

        configManager.persistDefaultProperties();

        ExecutorService pool = null;
        if (historyManager == null) {
            pool = Executors.newSingleThreadExecutor();
            historyManager = new HistoryManager(warDir, pool);
        }

        try {
            if (historyManager != null && !hasExisting) {
                historyManager.logEvent(EntryType.CONTENT,
                        new GitLocation(cloned.getRemoteRepository(), cloned.getBranchName(),
                                cloned.getCurrentRevision()),
                        "AUTO", renderedContentDir, "", "Initial content pull.", true, true);
            }
        } finally {
            if (pool != null) {
                pool.shutdownNow();
            }
        }

        return cloned;
    } catch (Throwable e) {
        cloned.close();
        throw new Exception(e);
    }
}

From source file:org.apache.streams.elasticsearch.ElasticsearchPersistReader.java

protected void shutdownAndAwaitTermination(ExecutorService pool) {
    pool.shutdown(); // Disable new tasks from being submitted
    try {/* w  ww. ja  v a  2  s.c o m*/
        // Wait a while for existing tasks to terminate
        if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
            pool.shutdownNow(); // Cancel currently executing tasks
            // Wait a while for tasks to respond to being cancelled
            if (!pool.awaitTermination(10, TimeUnit.SECONDS))
                LOGGER.error("Pool did not terminate");
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        pool.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }
}

From source file:org.apache.hadoop.hbase.master.MasterMobCompactionThread.java

/**
 * Wait for thread finish./*from  www. j  av  a2s  .  com*/
 * @param t the thread to wait
 * @param name the thread name.
 */
private void waitFor(ExecutorService t, String name) {
    boolean done = false;
    while (!done) {
        try {
            done = t.awaitTermination(60, TimeUnit.SECONDS);
            LOG.info("Waiting for " + name + " to finish...");
            if (!done) {
                t.shutdownNow();
            }
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted waiting for " + name + " to finish...");
        }
    }
}

From source file:com.baifendian.swordfish.execserver.runner.flow.FlowRunnerManager.java

/**
 *  executor service// w w w.  j av  a2s  . com
 */
private void shutdownExecutorService(ExecutorService executorService, boolean shutdownNow) {
    if (!executorService.isShutdown()) {
        try {
            if (!shutdownNow) {
                executorService.shutdown();
            } else {
                executorService.shutdownNow();
            }

            executorService.awaitTermination(3, TimeUnit.SECONDS);
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
    }
}

From source file:org.jumpmind.symmetric.service.impl.NodeCommunicationService.java

public void stop() {
    Collection<CommunicationType> services = new HashSet<NodeCommunication.CommunicationType>(
            executors.keySet());/*  w ww.ja va2  s  .com*/
    for (CommunicationType communicationType : services) {
        try {
            ExecutorService service = executors.get(communicationType);
            service.shutdownNow();
        } finally {
            executors.remove(communicationType);
        }
    }

}

From source file:org.v2020.service.ie.VnaImport.java

private void shutdownAndAwaitTermination(ExecutorService pool) {
    pool.shutdown(); // Disable new tasks from being submitted
    try {/*from   w w w .  j  a v  a2s  .  c om*/
        // Wait a while for existing tasks to terminate
        if (!pool.awaitTermination(getShutdownTimeoutInSeconds(), TimeUnit.SECONDS)) {
            pool.shutdownNow(); // Cancel currently executing tasks
            // Wait a while for tasks to respond to being cancelled
            if (!pool.awaitTermination(getShutdownTimeoutInSeconds(), TimeUnit.SECONDS))
                LOG.error("Thread pool did not terminate", pool);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        pool.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }
}