Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:org.apache.hadoop.fs.azure.BlockBlobAppendStream.java

/**
 * Helper method that starts an Append Lease renewer thread and the
 * thread pool./*from  www . jav a 2s. co  m*/
 */
public synchronized void initialize() {

    if (initialized) {
        return;
    }
    /*
     * Start the thread for  Append lease renewer.
     */
    Thread appendLeaseRenewer = new Thread(new AppendRenewer());
    appendLeaseRenewer.setDaemon(true);
    appendLeaseRenewer.setName(String.format("%s-AppendLeaseRenewer", key));
    appendLeaseRenewer.start();

    /*
     * Parameters to ThreadPoolExecutor:
     * corePoolSize : the number of threads to keep in the pool, even if they are idle,
     *                unless allowCoreThreadTimeOut is set
     * maximumPoolSize : the maximum number of threads to allow in the pool
     * keepAliveTime - when the number of threads is greater than the core,
     *                 this is the maximum time that excess idle threads will
     *                 wait for new tasks before terminating.
     * unit - the time unit for the keepAliveTime argument
     * workQueue - the queue to use for holding tasks before they are executed
     *  This queue will hold only the Runnable tasks submitted by the execute method.
     */
    this.ioThreadPool = new ThreadPoolExecutor(4, 4, 2, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(),
            new UploaderThreadFactory());

    initialized = true;
}

From source file:org.apache.axis2.transport.http.server.HttpFactory.java

/**
 * Create the executor used to launch the single requestConnectionListener
 *//*from   ww w .  j a v  a2s .  c  om*/
public ExecutorService newListenerExecutor(int port) {
    return new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue(),
            new DefaultThreadFactory(new ThreadGroup("Listener thread group"), "HttpListener-" + this.port));
}

From source file:org.activiti.engine.impl.asyncexecutor.DefaultAsyncJobExecutor.java

protected void initAsyncJobExecutionThreadPool() {
    if (threadPoolQueue == null) {
        log.info("Creating thread pool queue of size {}", queueSize);
        threadPoolQueue = new ArrayBlockingQueue<Runnable>(queueSize);
    }//w w  w . j a v  a 2  s.  c  om

    if (executorService == null) {
        log.info("Creating executor service with corePoolSize {}, maxPoolSize {} and keepAliveTime {}",
                corePoolSize, maxPoolSize, keepAliveTime);

        BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
                .namingPattern("activiti-async-job-executor-thread-%d").build();
        executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
                TimeUnit.MILLISECONDS, threadPoolQueue, threadFactory);
    }
}

From source file:org.apache.accumulo.core.file.rfile.MultiThreadedRFileTest.java

@SuppressFBWarnings(value = "INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE", justification = "information put into error message is safe and used for testing")
@Test//from ww w  .  j  a  v a  2  s. co  m
public void testMultipleReaders() throws IOException {
    final List<Throwable> threadExceptions = Collections.synchronizedList(new ArrayList<Throwable>());
    Map<String, MutableInt> messages = new HashMap<>();
    Map<String, String> stackTrace = new HashMap<>();

    final TestRFile trfBase = new TestRFile(conf);

    writeData(trfBase);

    trfBase.openReader();

    try {

        validate(trfBase);

        final TestRFile trfBaseCopy = trfBase.deepCopy();

        validate(trfBaseCopy);

        // now start up multiple RFile deepcopies
        int maxThreads = 10;
        String name = "MultiThreadedRFileTestThread";
        ThreadPoolExecutor pool = new ThreadPoolExecutor(maxThreads + 1, maxThreads + 1, 5 * 60,
                TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamingThreadFactory(name));
        pool.allowCoreThreadTimeOut(true);
        try {
            Runnable runnable = () -> {
                try {
                    TestRFile trf = trfBase;
                    synchronized (trfBaseCopy) {
                        trf = trfBaseCopy.deepCopy();
                    }
                    validate(trf);
                } catch (Throwable t) {
                    threadExceptions.add(t);
                }
            };
            for (int i = 0; i < maxThreads; i++) {
                pool.submit(runnable);
            }
        } finally {
            pool.shutdown();
            try {
                pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }

        for (Throwable t : threadExceptions) {
            String msg = t.getClass() + " : " + t.getMessage();
            if (!messages.containsKey(msg)) {
                messages.put(msg, new MutableInt(1));
            } else {
                messages.get(msg).increment();
            }
            StringWriter string = new StringWriter();
            PrintWriter writer = new PrintWriter(string);
            t.printStackTrace(writer);
            writer.flush();
            stackTrace.put(msg, string.getBuffer().toString());
        }
    } finally {
        trfBase.closeReader();
        trfBase.close();
    }

    for (String message : messages.keySet()) {
        LOG.error(messages.get(message) + ": " + message);
        LOG.error(stackTrace.get(message));
    }

    assertTrue(threadExceptions.isEmpty());
}

From source file:org.lizardirc.beancounter.Beancounter.java

private ExecutorService constructExecutorService() {
    BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("primaryListenerPool-thread%d")
            .daemon(true).build();/*from   w w  w.  j  a v a2 s.c  om*/
    ThreadPoolExecutor ret = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<>(), factory);
    ret.allowCoreThreadTimeOut(true);
    return ret;
}

From source file:org.openscore.worker.management.services.WorkerManager.java

public void doRecovery() {

    //        Attempts to stop all actively executing tasks, halts the
    //        processing of waiting tasks, and returns a list of the tasks
    //        that were awaiting execution.
    ////from   w ww  .  ja va2 s.  c  o  m
    //        This method does not wait for actively executing tasks to
    //        terminate.
    //
    //        There are no guarantees beyond best-effort attempts to stop
    //        processing actively executing tasks.  For example, typical
    //        implementations will cancel via {@link Thread#interrupt}, so any
    //        task that fails to respond to interrupts may never terminate.
    executorService.shutdownNow();

    try {
        logger.warn(
                "Worker is in doRecovery(). Cleaning state and cancelling running tasks. It may take up to 3 minutes...");
        boolean finished = executorService.awaitTermination(3, TimeUnit.MINUTES);

        if (finished) {
            logger.warn("Worker succeeded to cancel running tasks during doRecovery().");
        } else {
            logger.warn("Not all running tasks responded to cancel.");
        }
    } catch (InterruptedException ex) {
        /*ignore*/}

    mapOfRunningTasks.clear();

    //Make new executor
    executorService = new ThreadPoolExecutor(numberOfThreads, numberOfThreads, Long.MAX_VALUE,
            TimeUnit.NANOSECONDS, inBuffer, new WorkerThreadFactory("WorkerExecutionThread"));
}

From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java

/**
 * Creates an instance.// w  ww.j a v a2  s .  c o  m
 *
 * @param corePoolSize core size of the thread pool
 * @param maxPoolSize the max number of threads in the thread pool
 * @param maxConcurrentRequests maximum number of concurrent requests
 * @param client a client for making requests
 * @param clusterCoordinator the cluster coordinator to use for interacting with node statuses
 * @param connectionTimeout the connection timeout specified in milliseconds
 * @param readTimeout the read timeout specified in milliseconds
 * @param callback a callback that will be called whenever all of the responses have been gathered for a request. May be null.
 * @param eventReporter an EventReporter that can be used to notify users of interesting events. May be null.
 * @param nifiProperties properties
 */
public ThreadPoolRequestReplicator(final int corePoolSize, final int maxPoolSize,
        final int maxConcurrentRequests, final Client client, final ClusterCoordinator clusterCoordinator,
        final String connectionTimeout, final String readTimeout, final RequestCompletionCallback callback,
        final EventReporter eventReporter, final NiFiProperties nifiProperties) {
    if (corePoolSize <= 0) {
        throw new IllegalArgumentException("The Core Pool Size must be greater than zero.");
    } else if (maxPoolSize < corePoolSize) {
        throw new IllegalArgumentException("Max Pool Size must be >= Core Pool Size.");
    } else if (client == null) {
        throw new IllegalArgumentException("Client may not be null.");
    }

    this.client = client;
    this.clusterCoordinator = clusterCoordinator;
    this.connectionTimeoutMs = (int) FormatUtils.getTimeDuration(connectionTimeout, TimeUnit.MILLISECONDS);
    this.readTimeoutMs = (int) FormatUtils.getTimeDuration(readTimeout, TimeUnit.MILLISECONDS);
    this.maxConcurrentRequests = maxConcurrentRequests;
    this.responseMapper = new StandardHttpResponseMapper(nifiProperties);
    this.eventReporter = eventReporter;
    this.callback = callback;
    this.nifiProperties = nifiProperties;

    client.property(ClientProperties.CONNECT_TIMEOUT, connectionTimeoutMs);
    client.property(ClientProperties.READ_TIMEOUT, readTimeoutMs);
    client.property(ClientProperties.FOLLOW_REDIRECTS, Boolean.TRUE);

    final AtomicInteger threadId = new AtomicInteger(0);
    final ThreadFactory threadFactory = r -> {
        final Thread t = Executors.defaultThreadFactory().newThread(r);
        t.setDaemon(true);
        t.setName("Replicate Request Thread-" + threadId.incrementAndGet());
        return t;
    };

    executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 5, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), threadFactory);

    maintenanceExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override
        public Thread newThread(final Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);
            t.setName(ThreadPoolRequestReplicator.class.getSimpleName() + " Maintenance Thread");
            return t;
        }
    });

    maintenanceExecutor.scheduleWithFixedDelay(() -> purgeExpiredRequests(), 1, 1, TimeUnit.SECONDS);
}

From source file:org.codice.ddf.commands.catalog.IngestCommand.java

@Override
protected Object executeWithSubject() throws Exception {

    final CatalogFacade catalog = getCatalog();
    final File inputFile = new File(filePath);

    if (!inputFile.exists()) {
        printErrorMessage("File or directory [" + filePath + "] must exist.");
        console.println("If the file does indeed exist, try putting the path in quotes.");
        return null;
    }//from w w w . jav  a2  s. c  o  m

    if (deprecatedBatchSize != DEFAULT_BATCH_SIZE) {
        // user specified the old style batch size, so use that
        printErrorMessage(
                "Batch size positional argument is DEPRECATED, please use --batchsize option instead.");
        batchSize = deprecatedBatchSize;
    }

    if (batchSize <= 0) {
        printErrorMessage(
                "A batch size of [" + batchSize + "] was supplied. Batch size must be greater than 0.");
        return null;
    }

    if (!StringUtils.isEmpty(failedDir)) {
        failedIngestDirectory = new File(failedDir);
        if (!verifyFailedIngestDirectory()) {
            return null;
        }

        /**
         * Batch size is always set to 1, when using an Ingest Failure Directory.  If a batch size is specified by the user, issue 
         * a warning stating that a batch size of 1 will be used.
         */
        if (batchSize != DEFAULT_BATCH_SIZE) {
            console.println("WARNING: An ingest failure directory was supplied in addition to a batch size of "
                    + batchSize
                    + ". When using an ingest failure directory, the batch size must be 1. Setting batch size to 1.");
        }

        batchSize = 1;
    }

    BundleContext bundleContext = getBundleContext();
    if (!DEFAULT_TRANSFORMER_ID.equals(transformerId)) {
        ServiceReference[] refs = null;

        try {
            refs = bundleContext.getServiceReferences(InputTransformer.class.getName(),
                    "(|" + "(" + Constants.SERVICE_ID + "=" + transformerId + ")" + ")");
        } catch (InvalidSyntaxException e) {
            throw new IllegalArgumentException("Invalid transformer transformerId: " + transformerId, e);
        }

        if (refs == null || refs.length == 0) {
            throw new IllegalArgumentException("Transformer " + transformerId + " not found");
        } else {
            transformer = (InputTransformer) bundleContext.getService(refs[0]);
        }
    }

    Stream<Path> ingestStream = Files.walk(inputFile.toPath(), FileVisitOption.FOLLOW_LINKS);

    int totalFiles = (inputFile.isDirectory()) ? inputFile.list().length : 1;
    fileCount.getAndSet(totalFiles);

    final ArrayBlockingQueue<Metacard> metacardQueue = new ArrayBlockingQueue<>(batchSize * multithreaded);

    ExecutorService queueExecutor = Executors.newSingleThreadExecutor();

    final long start = System.currentTimeMillis();

    printProgressAndFlush(start, fileCount.get(), 0);

    queueExecutor.submit(() -> buildQueue(ingestStream, metacardQueue, start));

    final ScheduledExecutorService batchScheduler = Executors.newSingleThreadScheduledExecutor();

    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);

    submitToCatalog(batchScheduler, executorService, metacardQueue, catalog, start);

    while (!doneBuildingQueue.get() || processingThreads.get() != 0) {
        try {
            TimeUnit.SECONDS.sleep(2);
        } catch (InterruptedException e) {
            LOGGER.error("Ingest 'Waiting for processing to finish' thread interrupted: {}", e);
        }
    }

    try {
        queueExecutor.shutdown();
        executorService.shutdown();
        batchScheduler.shutdown();
    } catch (SecurityException e) {
        LOGGER.error("Executor service shutdown was not permitted: {}", e);
    }

    printProgressAndFlush(start, fileCount.get(), ingestCount.get() + ignoreCount.get());
    long end = System.currentTimeMillis();
    console.println();
    String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));

    console.println();
    console.printf(" %d file(s) ingested in %s %n", ingestCount.get(), elapsedTime);

    LOGGER.info("{} file(s) ingested in {} [{} records/sec]", ingestCount.get(), elapsedTime,
            calculateRecordsPerSecond(ingestCount.get(), start, end));
    INGEST_LOGGER.info("{} file(s) ingested in {} [{} records/sec]", ingestCount.get(), elapsedTime,
            calculateRecordsPerSecond(ingestCount.get(), start, end));

    if (fileCount.get() != ingestCount.get()) {
        console.println();
        if ((fileCount.get() - ingestCount.get() - ignoreCount.get()) >= 1) {
            String failedAmount = Integer.toString(fileCount.get() - ingestCount.get() - ignoreCount.get());
            printErrorMessage(
                    failedAmount + " file(s) failed to be ingested.  See the ingest log for more details.");
            INGEST_LOGGER.warn("{} files(s) failed to be ingested.", failedAmount);
        }
        if (ignoreList != null) {
            String ignoredAmount = Integer.toString(ignoreCount.get());
            printColor(Ansi.Color.YELLOW,
                    ignoredAmount + " file(s) ignored.  See the ingest log for more details.");
            INGEST_LOGGER.warn("{} files(s) were ignored.", ignoredAmount);
        }
    }
    console.println();

    return null;
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat//ww w .  j  a v  a  2 s .  c  om
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
            Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        // check whether there is invalid family name in HFiles to be bulkloaded
        Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
        ArrayList<String> familyNames = new ArrayList<String>();
        for (HColumnDescriptor family : families) {
            familyNames.add(family.getNameAsString());
        }
        ArrayList<String> unmatchedFamilies = new ArrayList<String>();
        for (LoadQueueItem lqi : queue) {
            String familyNameInHFile = Bytes.toString(lqi.family);
            if (!familyNames.contains(familyNameInHFile)) {
                unmatchedFamilies.add(familyNameInHFile);
            }
        }
        if (unmatchedFamilies.size() > 0) {
            String msg = "Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
                    + unmatchedFamilies + "; valid family names of table "
                    + Bytes.toString(table.getTableName()) + " are: " + familyNames;
            LOG.error(msg);
            throw new IOException(msg);
        }
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        //If using secure bulk load, get source delegation token, and
        //prepare staging directory and token
        if (userProvider.isHBaseSecurityEnabled()) {
            // fs is the source filesystem
            fsDelegationToken.acquireDelegationToken(fs);

            bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            if (!checkHFilesCountPerRegionPerFamily(regionGroups)) {
                // Error is logged inside checkHFilesCountPerRegionPerFamily.
                throw new IOException("Trying to load more than " + maxFilesPerRegionPerFamily
                        + " hfiles to one family of one region");
            }

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        if (userProvider.isHBaseSecurityEnabled()) {
            fsDelegationToken.releaseDelegationToken();

            if (bulkToken != null) {
                new SecureBulkLoadClient(table).cleanupBulkLoad(bulkToken);
            }
        }
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }

    if (queue != null && !queue.isEmpty()) {
        throw new RuntimeException(
                "Bulk load aborted with some files not yet loaded." + "Please check log for more details.");
    }
}

From source file:org.apache.axis2.transport.http.server.HttpFactory.java

/**
 * Create the executor use the manage request processing threads
 *//*from w  w  w .  jav a 2  s. c  o  m*/
public ExecutorService newRequestExecutor(int port) {
    return new ThreadPoolExecutor(requestCoreThreadPoolSize, requestMaxThreadPoolSize, threadKeepAliveTime,
            threadKeepAliveTimeUnit, newRequestBlockingQueue(),
            new DefaultThreadFactory(new ThreadGroup("Connection thread group"), "HttpConnection-" + port));
}