Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:org.apache.hc.core5.http.benchmark.HttpBenchmark.java

public Results doExecute() throws Exception {

    final URL url = config.getUrl();
    final long endTime = System.currentTimeMillis() + config.getTimeLimit() * 1000;
    final HttpHost host = new HttpHost(url.getHost(), url.getPort(), url.getProtocol());
    final ThreadPoolExecutor workerPool = new ThreadPoolExecutor(config.getThreads(), config.getThreads(), 5,
            TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {

                @Override/*from   w  ww .j  ava 2s .  co  m*/
                public Thread newThread(final Runnable r) {
                    return new Thread(r, "ClientPool");
                }

            });
    workerPool.prestartAllCoreThreads();

    SocketFactory socketFactory = null;
    if ("https".equals(host.getSchemeName())) {
        final SSLContextBuilder sslContextBuilder = new SSLContextBuilder();
        sslContextBuilder.setProtocol("SSL");
        if (config.isDisableSSLVerification()) {
            sslContextBuilder.loadTrustMaterial(null, new TrustStrategy() {

                @Override
                public boolean isTrusted(final X509Certificate[] chain, final String authType)
                        throws CertificateException {
                    return true;
                }

            });
        } else if (config.getTrustStorePath() != null) {
            sslContextBuilder.loadTrustMaterial(new File(config.getTrustStorePath()),
                    config.getTrustStorePassword() != null ? config.getTrustStorePassword().toCharArray()
                            : null);
        }
        if (config.getIdentityStorePath() != null) {
            sslContextBuilder.loadKeyMaterial(new File(config.getIdentityStorePath()),
                    config.getIdentityStorePassword() != null ? config.getIdentityStorePassword().toCharArray()
                            : null,
                    config.getIdentityStorePassword() != null ? config.getIdentityStorePassword().toCharArray()
                            : null);
        }
        final SSLContext sslContext = sslContextBuilder.build();
        socketFactory = sslContext.getSocketFactory();
    }

    final BenchmarkWorker[] workers = new BenchmarkWorker[config.getThreads()];
    for (int i = 0; i < workers.length; i++) {
        workers[i] = new BenchmarkWorker(host, createRequest(host), socketFactory, config);
        workerPool.execute(workers[i]);
    }

    while (workerPool.getCompletedTaskCount() < config.getThreads()) {
        Thread.yield();
        try {
            Thread.sleep(1000);
        } catch (final InterruptedException ignore) {
        }
        if (config.getTimeLimit() != -1 && System.currentTimeMillis() > endTime) {
            for (int i = 0; i < workers.length; i++) {
                workers[i].setShutdownSignal();
            }
        }
    }

    workerPool.shutdown();
    return ResultProcessor.collectResults(workers, host, config.getUrl().toString());
}

From source file:org.apache.http.benchmark.HttpBenchmark.java

public String execute() throws Exception {

    prepare();// ww  w .  j  a  v a2s  .com

    ThreadPoolExecutor workerPool = new ThreadPoolExecutor(config.getThreads(), config.getThreads(), 5,
            TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {

                public Thread newThread(Runnable r) {
                    return new Thread(r, "ClientPool");
                }

            });
    workerPool.prestartAllCoreThreads();

    BenchmarkWorker[] workers = new BenchmarkWorker[config.getThreads()];
    for (int i = 0; i < workers.length; i++) {
        workers[i] = new BenchmarkWorker(params, config.getVerbosity(), request[i], host, config.getRequests(),
                config.isKeepAlive(), config.isDisableSSLVerification(), config.getTrustStorePath(),
                config.getTrustStorePassword(), config.getIdentityStorePath(),
                config.getIdentityStorePassword());
        workerPool.execute(workers[i]);
    }

    while (workerPool.getCompletedTaskCount() < config.getThreads()) {
        Thread.yield();
        try {
            Thread.sleep(1000);
        } catch (InterruptedException ignore) {
        }
    }

    workerPool.shutdown();
    return ResultProcessor.printResults(workers, host, config.getUrl().toString(), contentLength);
}

From source file:eagle.jobrunning.crawler.RunningJobCrawlerImpl.java

private void startJobConfigProcessThread() {
    int configThreadCount = DEFAULT_CONFIG_THREAD_COUNT;
    LOG.info("Job Config crawler main thread started, pool size: " + DEFAULT_CONFIG_THREAD_COUNT);

    ThreadFactory factory = new ThreadFactory() {
        private final AtomicInteger count = new AtomicInteger(0);

        public Thread newThread(Runnable runnable) {
            count.incrementAndGet();/*from  w  ww  .  j av a 2 s.com*/
            Thread thread = Executors.defaultThreadFactory().newThread(runnable);
            thread.setName("config-crawler-workthread-" + count.get());
            return thread;
        }
    };

    ThreadPoolExecutor pool = new ThreadPoolExecutor(configThreadCount, configThreadCount, 0L,
            TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), factory);

    while (true) {
        JobContext context;
        try {
            context = queueOfConfig.take();
            LOG.info("queueOfConfig size: " + queueOfConfig.size());
            Runnable configCrawlerThread = new ConfigWorkTask(new JobContext(context), fetcher, callback, this);
            pool.execute(configCrawlerThread);
        } catch (InterruptedException e) {
            LOG.warn("Got an InterruptedException: " + e.getMessage());
        } catch (RejectedExecutionException e2) {
            LOG.warn("Got RejectedExecutionException: " + e2.getMessage());
        } catch (Throwable t) {
            LOG.warn("Got an throwable t, " + t.getMessage());
        }
    }
}

From source file:org.apache.hadoop.hbase.client.TestHCM.java

@Test
public void testClusterConnection() throws IOException {
    ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm"));

    HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration());
    HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool);
    // make sure the internally created ExecutorService is the one passed
    assertTrue(otherPool == ((HConnectionImplementation) con2).getCurrentBatchPool());

    String tableName = "testClusterConnection";
    TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close();
    HTable t = (HTable) con1.getTable(tableName, otherPool);
    // make sure passing a pool to the getTable does not trigger creation of an internal pool
    assertNull("Internal Thread pool should be null", ((HConnectionImplementation) con1).getCurrentBatchPool());
    // table should use the pool passed
    assertTrue(otherPool == t.getPool());
    t.close();//from w  w  w  .ja  v a2s .co  m

    t = (HTable) con2.getTable(tableName);
    // table should use the connectin's internal pool
    assertTrue(otherPool == t.getPool());
    t.close();

    t = (HTable) con2.getTable(Bytes.toBytes(tableName));
    // try other API too
    assertTrue(otherPool == t.getPool());
    t.close();

    t = (HTable) con2.getTable(TableName.valueOf(tableName));
    // try other API too
    assertTrue(otherPool == t.getPool());
    t.close();

    t = (HTable) con1.getTable(tableName);
    ExecutorService pool = ((HConnectionImplementation) con1).getCurrentBatchPool();
    // make sure an internal pool was created
    assertNotNull("An internal Thread pool should have been created", pool);
    // and that the table is using it
    assertTrue(t.getPool() == pool);
    t.close();

    t = (HTable) con1.getTable(tableName);
    // still using the *same* internal pool
    assertTrue(t.getPool() == pool);
    t.close();

    con1.close();
    // if the pool was created on demand it should be closed upon connection close
    assertTrue(pool.isShutdown());

    con2.close();
    // if the pool is passed, it is not closed
    assertFalse(otherPool.isShutdown());
    otherPool.shutdownNow();
}

From source file:org.apache.bookkeeper.util.OrderedSafeExecutor.java

/**
 * Constructs Safe executor/*ww  w .  jav a  2  s.c  om*/
 *
 * @param numThreads
 *            - number of threads
 * @param baseName
 *            - base name of executor threads
 * @param threadFactory
 *            - for constructing threads
 * @param statsLogger
 *            - for reporting executor stats
 * @param traceTaskExecution
 *            - should we stat task execution
 * @param warnTimeMicroSec
 *            - log long task exec warning after this interval
 */
@SuppressWarnings("unchecked")
private OrderedSafeExecutor(String baseName, int numThreads, ThreadFactory threadFactory,
        StatsLogger statsLogger, boolean traceTaskExecution, long warnTimeMicroSec) {
    Preconditions.checkArgument(numThreads > 0);
    Preconditions.checkArgument(!StringUtils.isBlank(baseName));

    this.warnTimeMicroSec = warnTimeMicroSec;
    name = baseName;
    threads = new ThreadPoolExecutor[numThreads];
    threadIds = new long[numThreads];
    queues = new BlockingQueue[numThreads];
    for (int i = 0; i < numThreads; i++) {
        queues[i] = new LinkedBlockingQueue<Runnable>();
        threads[i] = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, queues[i],
                new ThreadFactoryBuilder().setNameFormat(name + "-orderedsafeexecutor-" + i + "-%d")
                        .setThreadFactory(threadFactory).build());

        // Save thread ids
        final int idx = i;
        try {
            threads[idx].submit(new SafeRunnable() {
                @Override
                public void safeRun() {
                    threadIds[idx] = Thread.currentThread().getId();
                }
            }).get();
        } catch (InterruptedException e) {
            throw new RuntimeException("Couldn't start thread " + i, e);
        } catch (ExecutionException e) {
            throw new RuntimeException("Couldn't start thread " + i, e);
        }

        // Register gauges
        statsLogger.registerGauge(String.format("%s-queue-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return queues[idx].size();
            }
        });
        statsLogger.registerGauge(String.format("%s-completed-tasks-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return threads[idx].getCompletedTaskCount();
            }
        });
        statsLogger.registerGauge(String.format("%s-total-tasks-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return threads[idx].getTaskCount();
            }
        });
    }

    // Stats
    this.taskExecutionStats = statsLogger.scope(name).getOpStatsLogger("task_execution");
    this.taskPendingStats = statsLogger.scope(name).getOpStatsLogger("task_queued");
    this.traceTaskExecution = traceTaskExecution;
}

From source file:org.apache.hadoop.hbase.util.TestHBaseFsck.java

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.handler.count", 2);
    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.metahandler.count", 2);
    TEST_UTIL.startMiniCluster(3);/* www . j a v  a2  s. com*/

    executorService = new ThreadPoolExecutor(1, Integer.MAX_VALUE, 60, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("testhbck"));

    AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    regionStates = assignmentManager.getRegionStates();
    TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true);
}

From source file:com.addthis.hydra.kafka.consumer.KafkaSource.java

@Override
public void init() {
    try {/*from   w  ww.ja va 2 s.  c o  m*/
        if (ignoreMarkDir) {
            File md = new File(markDir);
            if (md.exists()) {
                FileUtils.deleteDirectory(md);
                log.info("Deleted marks directory : {}", md);
            }
        }
        this.bundleQueue = new LinkedBlockingQueue<>(queueSize);
        this.markDb = new PageDB<>(LessFiles.initDirectory(markDir), SimpleMark.class, 100, 100);
        // move to init method
        this.fetchExecutor = new ThreadPoolExecutor(fetchThreads, fetchThreads, 0L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setNameFormat("source-kafka-fetch-%d").setDaemon(true).build());
        this.decodeExecutor = new ThreadPoolExecutor(decodeThreads, decodeThreads, 0L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setNameFormat("source-kafka-decode-%d").setDaemon(true).build());
        this.running = new AtomicBoolean(true);
        final DateTime startTime = (startDate != null) ? DateUtil.getDateTime(dateFormat, startDate) : null;

        zkClient = ZkUtil.makeStandardClient(zookeeper, false);
        TopicMetadata metadata = null;
        int metadataAttempt = 0;
        while (metadata == null && metadataAttempt < metadataRetries) {
            try {
                metadata = ConsumerUtils.getTopicMetadata(zkClient, seedBrokers, topic);
            } catch (Exception e) {
                log.error(
                        "failed to get kafka metadata (attempt {} / {}) for topic: {}, using brokers: {}, error: {}",
                        metadataAttempt, metadataRetries, topic, seedBrokers, e);
                Thread.sleep(metadataBackoff);
            }
            metadataAttempt++;
        }

        final Integer[] shards = config.calcShardList(metadata.partitionsMetadata().size());
        final ListBundleFormat bundleFormat = new ListBundleFormat();
        final CountDownLatch decodeLatch = new CountDownLatch(shards.length);
        for (final int shard : shards) {
            LinkedBlockingQueue<MessageWrapper> messageQueue = new LinkedBlockingQueue<>(this.queueSize);
            final PartitionMetadata partition = metadata.partitionsMetadata().get(shard);
            FetchTask fetcher = new FetchTask(this, topic, partition, startTime, messageQueue);
            fetchExecutor.execute(fetcher);
            Runnable decoder = new DecodeTask(decodeLatch, format, bundleFormat, running, messageQueue,
                    bundleQueue);
            decodeExecutor.execute(decoder);
        }
        decodeExecutor.submit(new MarkEndTask<>(decodeLatch, running, bundleQueue, bundleQueueEndMarker));
    } catch (Exception ex) {
        log.error("Error initializing kafka source: ", ex);
        throw new RuntimeException(ex);
    }
}

From source file:com.espertech.esper.core.thread.ThreadingServiceImpl.java

private ThreadPoolExecutor getThreadPool(String engineURI, String name, BlockingQueue<Runnable> queue,
        int numThreads) {
    if (log.isInfoEnabled()) {
        log.info("Starting pool " + name + " with " + numThreads + " threads");
    }/*w  w  w .  java  2  s  .c o  m*/

    if (engineURI == null) {
        engineURI = "default";
    }

    String threadGroupName = "com.espertech.esper." + engineURI + "-" + name;
    ThreadGroup threadGroup = new ThreadGroup(threadGroupName);
    ThreadPoolExecutor pool = new ThreadPoolExecutor(numThreads, numThreads, 1, TimeUnit.SECONDS, queue,
            new EngineThreadFactory(engineURI, name, threadGroup, Thread.NORM_PRIORITY));
    pool.prestartAllCoreThreads();

    return pool;
}

From source file:com.vc.net.AsyncHttpClient.java

public AsyncHttpClient() {
    BasicHttpParams httpParams = new BasicHttpParams();

    ConnManagerParams.setTimeout(httpParams, socketTimeout);
    ConnManagerParams.setMaxConnectionsPerRoute(httpParams, new ConnPerRouteBean(maxConnections));
    ConnManagerParams.setMaxTotalConnections(httpParams, DEFAULT_MAX_CONNECTIONS);

    HttpConnectionParams.setSoTimeout(httpParams, socketTimeout);
    HttpConnectionParams.setConnectionTimeout(httpParams, socketTimeout);
    HttpConnectionParams.setTcpNoDelay(httpParams, true);
    HttpConnectionParams.setSocketBufferSize(httpParams, DEFAULT_SOCKET_BUFFER_SIZE);

    HttpProtocolParams.setVersion(httpParams, HttpVersion.HTTP_1_1);
    HttpProtocolParams.setUserAgent(httpParams,
            String.format("uroad-android-httpclient/%s (http://www.u-road.com/)", VERSION));
    SchemeRegistry schemeRegistry = new SchemeRegistry();
    schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80));
    schemeRegistry.register(new Scheme("https", SSLSocketFactory.getSocketFactory(), 443));
    ThreadSafeClientConnManager cm = new ThreadSafeClientConnManager(httpParams, schemeRegistry);

    httpContext = new SyncBasicHttpContext(new BasicHttpContext());
    httpClient = new DefaultHttpClient(cm, httpParams);
    httpClient.addRequestInterceptor(new HttpRequestInterceptor() {
        @Override/*from   w  w  w .  j av  a 2 s.c o  m*/
        public void process(HttpRequest request, HttpContext context) {
            if (!request.containsHeader(HEADER_ACCEPT_ENCODING)) {
                request.addHeader(HEADER_ACCEPT_ENCODING, ENCODING_GZIP);
            }
            for (String header : clientHeaderMap.keySet()) {
                request.addHeader(header, clientHeaderMap.get(header));
            }
        }
    });
    httpClient.addResponseInterceptor(new HttpResponseInterceptor() {
        @Override
        public void process(HttpResponse response, HttpContext context) {
            final HttpEntity entity = response.getEntity();
            if (entity == null) {
                return;
            }
            final Header encoding = entity.getContentEncoding();
            if (encoding != null) {
                for (HeaderElement element : encoding.getElements()) {
                    if (element.getName().equalsIgnoreCase(ENCODING_GZIP)) {
                        response.setEntity(new InflatingEntity(response.getEntity()));
                        break;
                    }
                }
            }
        }
    });

    httpClient.setHttpRequestRetryHandler(new RetryHandler(DEFAULT_MAX_RETRIES));

    threadPool = new ThreadPoolExecutor(DEFAULT_CORE_POOL_SIZE, DEFAULT_MAXIMUM_POOL_SIZE,
            DEFAULT_KEEP_ALIVETIME, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(3),
            new ThreadPoolExecutor.CallerRunsPolicy());

    requestMap = new WeakHashMap<Context, List<WeakReference<Future<?>>>>();
    clientHeaderMap = new HashMap<String, String>();
}

From source file:com.addthis.hydra.task.source.DataSourceStreamList.java

@Override
public void postDecode() {
    sourceInitService = MoreExecutors.getExitingExecutorService(new ThreadPoolExecutor(sourceInitThreads,
            sourceInitThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingDeque<Runnable>(),
            new ThreadFactoryBuilder().setNameFormat("SourceInitThread-%d").build()));
    peekerService = MoreExecutors.getExitingExecutorService(new ThreadPoolExecutor(peekerThreads, peekerThreads,
            0L, TimeUnit.MILLISECONDS, new LinkedBlockingDeque<Runnable>(),
            new ThreadFactoryBuilder().setNameFormat("TaskDataSourcePeeker-%d").build()));
}