Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.eclipse.rdf4j.http.server.ProtocolTest.java

/**
 * Test for SES-1861//from   w  w w .ja  va2s . c o m
 * 
 * @throws Exception
 */
@Test
public void testConcurrentNamespaceUpdates() throws Exception {
    int limitCount = 1000;
    int limitPrefix = 50;

    Random prng = new Random();

    // String repositoryLocation =
    // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame",
    // "Test-NativeStore");
    String repositoryLocation = TestServer.REPOSITORY_URL;

    ExecutorService threadPool = Executors.newFixedThreadPool(20,
            new ThreadFactoryBuilder().setNameFormat("rdf4j-protocoltest-%d").build());

    for (int count = 0; count < limitCount; count++) {
        final int number = count;
        final int i = prng.nextInt(limitPrefix);
        final String prefix = "prefix" + i;
        final String ns = "http://example.org/namespace" + i;

        final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix);

        Runnable runner = new Runnable() {

            public void run() {
                try {
                    if (number % 2 == 0) {
                        putNamespace(location, ns);
                    } else {
                        deleteNamespace(location);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                    fail("Failed in test: " + number);
                }
            }
        };
        threadPool.execute(runner);
    }
    threadPool.shutdown();
    threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS);
    threadPool.shutdownNow();
}

From source file:it.geosolutions.tools.io.file.Copy.java

/**
 * Copy a list of files (preserving data) to a destination (which can be on
 * nfs) waiting (at least) 'seconds' seconds for each file propagation.
 * /* w w  w .  j  ava 2 s .  c  o  m*/
 * @param es
 *            The ExecutorService or null if you want to use a
 *            CachedThreadPool.
 * @note potentially this is a bad executor (for log lists of big files)
 *       NOTE: we should make some tests on this 22 Aug 2011
 * @param list
 * @param baseDestDir
 * @param overwrite
 *            if false and destination exists() do not overwrite the file
 * @param seconds
 * @return the resulting moved file list or null
 * 
 */
public static List<File> parallelCopyListFileToNFS(ExecutorService es, final List<File> list,
        final File baseDestDir, final int seconds) {

    try {

        /*
         * this could be potentially a bad executor (for log lists of big
         * files) NOTE: we should make some tests on this 22 Aug 2011
         */
        if (es == null) {
            final ThreadFactory threadFactory = Executors.defaultThreadFactory();
            es = Executors.newCachedThreadPool(threadFactory);
        }

        final List<FutureTask<File>> futureFileList = asynchCopyListFileToNFS(es, list, baseDestDir, seconds);

        // list
        if (futureFileList == null) {
            if (LOGGER.isErrorEnabled())
                LOGGER.error("Failed to copy files.");
            return null;
        }
        final int size = futureFileList.size();
        if (size == 0) {
            if (LOGGER.isErrorEnabled())
                LOGGER.error("Failed to copy file list using an empty list");
            return null;
        }

        final List<File> ret = new ArrayList<File>(size);
        for (Future<File> futureFile : futureFileList) {

            if (futureFile != null) {

                File file;
                try {
                    file = futureFile.get();
                    if (file != null && file.exists()) {
                        ret.add(file);
                    } else {
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn("SKIPPING file:\n\t" + file + ".\nUnable to copy a not existent file.");
                    }
                } catch (InterruptedException e) {
                    if (LOGGER.isErrorEnabled())
                        LOGGER.error("Unable to get the file from this future File copy. ", e);
                } catch (ExecutionException e) {
                    if (LOGGER.isErrorEnabled())
                        LOGGER.error("Unable to get the file from this future File copy. ", e);
                }
            }
        }

        return ret;
    } catch (Throwable t) {
        if (LOGGER.isErrorEnabled())
            LOGGER.error("Unrecognized error occurred. ", t);
    } finally {
        if (es != null)
            es.shutdownNow();
    }
    return null;

}

From source file:org.compass.gps.device.support.parallel.ConcurrentParallelIndexExecutor.java

/**
 * Performs the indexing process using the provided index entities indexer. Creates a pool of N
 * threads (if <code>maxThreads</code> is set to -1, N is the numer of entities groups, otherwise
 * N is the number of <code>maxThreads</code>).
 *
 * @param entities             The partitioned index entities groups and index entities to index
 * @param indexEntitiesIndexer The entities indexer to use
 * @param compassGps           Compass gps interface for meta information
 *///from ww w . j a  v  a  2  s  .  co m
public void performIndex(final IndexEntity[][] entities, final IndexEntitiesIndexer indexEntitiesIndexer,
        final CompassGpsInterfaceDevice compassGps) {

    if (entities.length <= 0) {
        if (ignoreNoEtities) {
            return;
        }
        throw new IllegalArgumentException(
                "No entities listed to be indexed, have you defined your entities correctly?");
    }
    int maxThreads = this.maxThreads;
    if (maxThreads == -1) {
        maxThreads = entities.length;
    }
    ExecutorService executorService = Executors.newFixedThreadPool(maxThreads,
            new NamedThreadFactory("Compass Gps Index", false));
    try {
        ArrayList tasks = new ArrayList();
        for (int i = 0; i < entities.length; i++) {
            final IndexEntity[] indexEntities = entities[i];
            tasks.add(new Callable() {
                public Object call() throws Exception {
                    compassGps.executeForIndex(new CompassCallbackWithoutResult() {
                        protected void doInCompassWithoutResult(CompassSession session)
                                throws CompassException {
                            indexEntitiesIndexer.performIndex(session, indexEntities);
                            session.flush();
                        }
                    });
                    return null;
                }
            });
        }
        List futures;
        try {
            futures = executorService.invokeAll(tasks);
        } catch (InterruptedException e) {
            throw new CompassGpsException("Failed to index, interrupted", e);
        }

        for (Iterator it = futures.iterator(); it.hasNext();) {
            Future future = (Future) it.next();
            try {
                future.get();
            } catch (InterruptedException e) {
                throw new CompassGpsException("Failed to index, interrupted", e);
            } catch (ExecutionException e) {
                throw new CompassGpsException("Failed to index, execution exception", e);
            }
        }
    } finally {
        executorService.shutdownNow();
    }
}

From source file:com.sap.research.connectivity.gw.GWOperationsUtils.java

public String getMetadataString(String url, String user, String pass, String host, String port, int timeOut)
        throws Exception {
    String returnString = "";

    try {/*from  ww w .j  a  v a 2  s  .  c o m*/
        String execArgs[] = new String[] { "java", "-jar",
                System.getProperty("user.home") + SEPARATOR + "appToRetrieveOdataMetadata.jar", url, user, pass,
                host, port };

        final Process theProcess = Runtime.getRuntime().exec(execArgs);

        Callable<String> call = new Callable<String>() {
            public String call() throws Exception {
                String returnString = "";
                try {
                    BufferedReader inStream = new BufferedReader(
                            new InputStreamReader(theProcess.getInputStream()));
                    returnString = IOUtils.toString(inStream);
                    IOUtils.closeQuietly(inStream);
                    //if (theProcess.exitValue() != 0)
                    theProcess.waitFor();
                } catch (InterruptedException e) {
                    throw new TimeoutException();
                    //log.severe("The call to the Gateway Service was interrupted.");
                }
                return returnString;
            }
        };

        final ExecutorService theExecutor = Executors.newSingleThreadExecutor();
        Future<String> futureResultOfCall = theExecutor.submit(call);
        try {
            returnString = futureResultOfCall.get(timeOut, TimeUnit.SECONDS);
        } catch (TimeoutException ex) {
            throw new TimeoutException(
                    "The Gateway Service call timed out. Please try again or check your settings.");
        } catch (ExecutionException ex) {
            throw new RuntimeException("The Gateway Service call did not complete due to an execution error. "
                    + ex.getCause().getLocalizedMessage());
        } finally {
            theExecutor.shutdownNow();
        }
    } catch (InterruptedException ex) {
        throw new InterruptedException(
                "The Gateway Service call did not complete due to an unexpected interruption.");
    } catch (IOException e) {
        throw new IOException("Error when retrieving metadata from the Gateway Service.");
    }

    return returnString;
}

From source file:com.adaptris.core.PoolingWorkflow.java

private void populatePool() throws CoreException {
    int size = minIdle();
    ExecutorService populator = Executors.newCachedThreadPool();
    try {/*from  w  ww  .j a v  a 2  s. c  o  m*/
        final CyclicBarrier barrier = new CyclicBarrier(size + 1);
        log.trace("Need more ({}) children as soon as possible to handle work. Get to it", size);
        final List<Worker> workers = new ArrayList<>(size);
        for (int i = 0; i < size; i++) {
            populator.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        Worker w = objectPool.borrowObject();
                        workers.add(w);
                        barrier.await(initWaitTimeMs(), TimeUnit.MILLISECONDS);
                    } catch (Exception e) {
                        barrier.reset();
                    }
                }
            });
        }
        barrier.await(initWaitTimeMs(), TimeUnit.MILLISECONDS);
        for (Worker worker : workers) {
            objectPool.returnObject(worker);
        }
    } catch (Exception e) {
        throw new CoreException(e);
    } finally {
        populator.shutdownNow();
    }
}

From source file:fsm.series.Series_CC.java

/**
 * Computes the 5 integrals simultaneously for increased performance.
 *
 * @param m Fourier term row//from   ww w.  j a va 2  s  .c o m
 * @param n Fourier term column
 * @return double array of size 5 with indexes corresponding to integral
 * number (1-5)
 */
@Override
public double[] getIntegralValues(int m, int n) {

    double[] I = new double[5];

    //if (!integralsCalculated) {

    Callable<Double> tsk1 = () -> getI1(m, n);

    Callable<Double> tsk2 = () -> getI2(m, n);

    Callable<Double> tsk3 = () -> getI3(m, n);

    Callable<Double> tsk4 = () -> getI4(m, n);

    Callable<Double> tsk5 = () -> getI5(m, n);

    ExecutorService service;
    final Future<Double> thread1, thread2, thread3, thread4, thread5;

    service = Executors.newFixedThreadPool(5);
    thread1 = service.submit(tsk1);
    thread2 = service.submit(tsk2);
    thread3 = service.submit(tsk3);
    thread4 = service.submit(tsk4);
    thread5 = service.submit(tsk5);

    try {
        I[0] = thread1.get();
    } catch (InterruptedException | ExecutionException ex) {
        Logger.getLogger(Series_CC.class.getName()).log(Level.SEVERE, null, ex);
    }
    try {
        I[1] = thread2.get();
    } catch (InterruptedException | ExecutionException ex) {
        Logger.getLogger(Series_CC.class.getName()).log(Level.SEVERE, null, ex);
    }
    try {
        I[2] = thread3.get();
    } catch (InterruptedException | ExecutionException ex) {
        Logger.getLogger(Series_CC.class.getName()).log(Level.SEVERE, null, ex);
    }
    try {
        I[3] = thread4.get();
    } catch (InterruptedException | ExecutionException ex) {
        Logger.getLogger(Series_CC.class.getName()).log(Level.SEVERE, null, ex);
    }

    try {
        I[4] = thread5.get();
    } catch (InterruptedException | ExecutionException ex) {
        Logger.getLogger(Series_CC.class.getName()).log(Level.SEVERE, null, ex);
    }

    service.shutdownNow();
    //        } else {
    //            if (n >= m) {
    //                I[0] = I1Mat.get(m - 1, n - 1);
    //                I[1] = I2Mat.get(m - 1, n - 1);
    //                I[2] = I3Mat.get(m - 1, n - 1);
    //                I[3] = I4Mat.get(m - 1, n - 1);
    //                I[4] = I5Mat.get(m - 1, n - 1);
    //            } else {
    //                I[0] = I1Mat.get(n - 1, m - 1);
    //                I[1] = I3Mat.get(n - 1, m - 1);
    //                I[2] = I2Mat.get(n - 1, m - 1);
    //                I[3] = I4Mat.get(n - 1, m - 1);
    //                I[4] = I5Mat.get(n - 1, m - 1);
    //            }
    //        }

    return I;
}

From source file:com.netflix.zeno.diff.TypeDiffOperation.java

@SuppressWarnings("unchecked")
public TypeDiff<T> performDiff(DiffSerializationFramework framework, Iterable<T> fromState, Iterable<T> toState,
        int numThreads) {
    Map<Object, T> fromStateObjects = new HashMap<Object, T>();

    for (T obj : fromState) {
        fromStateObjects.put(instruction.getKey(obj), obj);
    }//  ww  w  . j av  a  2 s  .c o m

    ArrayList<List<T>> perProcessorWorkList = new ArrayList<List<T>>(numThreads); // each entry is a job
    for (int i = 0; i < numThreads; ++i) {
        perProcessorWorkList.add(new ArrayList<T>());
    }

    Map<Object, Object> toStateKeys = new ConcurrentHashMap<Object, Object>();

    int toIncrCount = 0;
    for (T toObject : toState) {
        perProcessorWorkList.get(toIncrCount % numThreads).add(toObject);
        toIncrCount++;
    }

    ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            final Thread thread = new Thread(r, "TypeDiff_" + instruction.getTypeIdentifier());
            thread.setDaemon(true);
            return thread;
        }
    });

    try {
        ArrayList<Future<TypeDiff<T>>> workResultList = new ArrayList<Future<TypeDiff<T>>>(
                perProcessorWorkList.size());
        for (final List<T> workList : perProcessorWorkList) {
            if (workList != null && !workList.isEmpty()) {
                workResultList.add(executor.submit(new TypeDiffCallable<T>(framework, instruction,
                        fromStateObjects, toStateKeys, workList)));
            }
        }

        TypeDiff<T> mergedDiff = new TypeDiff<T>(instruction.getTypeIdentifier());
        for (final Future<TypeDiff<T>> future : workResultList) {
            try {
                TypeDiff<T> typeDiff = future.get();
                mergeTypeDiff(mergedDiff, typeDiff);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

        for (Map.Entry<Object, T> entry : fromStateObjects.entrySet()) {
            mergedDiff.incrementFrom();
            if (!toStateKeys.containsKey(entry.getKey()))
                mergedDiff.addExtraInFrom(entry.getValue());
        }

        return mergedDiff;

    } finally {
        executor.shutdownNow();
    }
}

From source file:org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics.java

private long computeUnconsumedMessages(String topic, String group) {
    ExecutorService exec = Executors.newSingleThreadExecutor();
    Future<Long> future = exec.submit(() -> {

        long lag = 0;
        try {/*from  w  ww.  j a  v  a  2  s . c  o m*/
            Consumer<?, ?> metadataConsumer = this.metadataConsumers.computeIfAbsent(group,
                    (g) -> createConsumerFactory().createConsumer(g, "monitoring"));
            synchronized (metadataConsumer) {
                List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
                List<TopicPartition> topicPartitions = new LinkedList<>();
                for (PartitionInfo partitionInfo : partitionInfos) {
                    topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
                }

                Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);

                for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
                    OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
                    lag += endOffset.getValue();
                    if (current != null) {
                        lag -= current.offset();
                    }
                }
            }
        } catch (Exception ex) {
            LOG.debug("Cannot generate metric for topic: " + topic, ex);
        }
        return lag;
    });
    try {
        return future.get(this.timeout, TimeUnit.SECONDS);
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        return 0L;
    } catch (ExecutionException | TimeoutException ex) {
        return 0L;
    } finally {
        exec.shutdownNow();
    }
}

From source file:org.apache.hadoop.hbase.master.TestTableLockManager.java

@Test(timeout = 600000)
public void testReapAllTableLocks() throws Exception {
    prepareMiniZkCluster();//  www  .  j  a va 2 s. c o m
    ServerName serverName = ServerName.valueOf("localhost:10000", 0);
    final TableLockManager lockManager = TableLockManager.createTableLockManager(TEST_UTIL.getConfiguration(),
            TEST_UTIL.getZooKeeperWatcher(), serverName);

    String tables[] = { "table1", "table2", "table3", "table4" };
    ExecutorService executor = Executors.newFixedThreadPool(6);

    final CountDownLatch writeLocksObtained = new CountDownLatch(4);
    final CountDownLatch writeLocksAttempted = new CountDownLatch(10);
    //TODO: read lock tables

    //6 threads will be stuck waiting for the table lock
    for (int i = 0; i < tables.length; i++) {
        final String table = tables[i];
        for (int j = 0; j < i + 1; j++) { //i+1 write locks attempted for table[i]
            executor.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    writeLocksAttempted.countDown();
                    lockManager.writeLock(TableName.valueOf(table), "testReapAllTableLocks").acquire();
                    writeLocksObtained.countDown();
                    return null;
                }
            });
        }
    }

    writeLocksObtained.await();
    writeLocksAttempted.await();

    //now reap all table locks
    lockManager.reapWriteLocks();

    TEST_UTIL.getConfiguration().setInt(TableLockManager.TABLE_WRITE_LOCK_TIMEOUT_MS, 0);
    TableLockManager zeroTimeoutLockManager = TableLockManager
            .createTableLockManager(TEST_UTIL.getConfiguration(), TEST_UTIL.getZooKeeperWatcher(), serverName);

    //should not throw table lock timeout exception
    zeroTimeoutLockManager.writeLock(TableName.valueOf(tables[tables.length - 1]), "zero timeout").acquire();

    executor.shutdownNow();
}

From source file:io.druid.data.input.impl.prefetch.PrefetchSqlFirehoseFactory.java

@Override
public Firehose connect(InputRowParser<Map<String, Object>> firehoseParser, @Nullable File temporaryDirectory)
        throws IOException {
    if (objects == null) {
        objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects"));
    }/*from www. j  av a  2s. co  m*/
    if (cacheManager.isEnabled() || prefetchConfig.getMaxFetchCapacityBytes() > 0) {
        Preconditions.checkNotNull(temporaryDirectory, "temporaryDirectory");
        Preconditions.checkArgument(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist",
                temporaryDirectory);
        Preconditions.checkArgument(temporaryDirectory.isDirectory(),
                "temporaryDirectory[%s] is not a directory", temporaryDirectory);
    }

    LOG.info("Create a new firehose for [%d] queries", objects.size());

    // fetchExecutor is responsible for background data fetching
    final ExecutorService fetchExecutor = Execs.singleThreaded("firehose_fetch_%d");
    final Fetcher<T> fetcher = new SqlFetcher<>(cacheManager, objects, fetchExecutor, temporaryDirectory,
            prefetchConfig, new ObjectOpenFunction<T>() {
                @Override
                public InputStream open(T object, File outFile) throws IOException {
                    return openObjectStream(object, outFile);
                }

                @Override
                public InputStream open(T object) throws IOException {
                    final File outFile = File.createTempFile("sqlresults_", null, temporaryDirectory);
                    return openObjectStream(object, outFile);
                }
            });

    return new SqlFirehose(new Iterator<JsonIterator<Map<String, Object>>>() {
        @Override
        public boolean hasNext() {
            return fetcher.hasNext();
        }

        @Override
        public JsonIterator<Map<String, Object>> next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }
            try {
                TypeReference<Map<String, Object>> type = new TypeReference<Map<String, Object>>() {
                };
                final OpenedObject<T> openedObject = fetcher.next();
                final InputStream stream = openedObject.getObjectStream();
                return new JsonIterator<>(type, stream, openedObject.getResourceCloser(), objectMapper);
            } catch (Exception ioe) {
                throw new RuntimeException(ioe);
            }
        }
    }, firehoseParser, () -> {
        fetchExecutor.shutdownNow();
        try {
            Preconditions.checkState(
                    fetchExecutor.awaitTermination(prefetchConfig.getFetchTimeout(), TimeUnit.MILLISECONDS));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ISE("Failed to shutdown fetch executor during close");
        }
    });
}