Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:org.cloudgraph.rdb.graph.ParallelGraphAssembler.java

/**
 * Constructor.//w w  w . j  a  va  2  s.c  o  m
 * 
 * @param rootType
 *          the SDO root type for the result data graph
 * @param collector
 *          selected SDO properties. Properties are mapped by selected types
 *          required in the result graph.
 * @param snapshotDate
 *          the query snapshot date which is populated into every data object
 *          in the result data graph.
 * @param minPoolSize
 *          the minimum or core size of the underlying thread pool used for
 *          all tasks executed under this assembler
 * @param maxPoolSize
 *          the maximum size of the underlying thread pool used for all tasks
 *          executed under this assembler
 * @param con
 */
public ParallelGraphAssembler(PlasmaType rootType, SelectionCollector collector, Timestamp snapshotDate,
        ConfigProps config, Connection con) {
    super(rootType, collector, new RDBStatementFactory(), new RDBStatementExecutor(con),
            new ConcurrentHashMap<Integer, PlasmaDataObject>(), snapshotDate);
    this.executorService = new ThreadPoolExecutor(config.getMinThreadPoolSize(), config.getMaxThreadPoolSize(),
            0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(),
            new ThreadPoolExecutor.CallerRunsPolicy());
    this.config = config;
}

From source file:com.netflix.suro.client.async.AsyncSuroClient.java

@Inject
public AsyncSuroClient(ClientConfig config, Queue4Client messageQueue, ConnectionPool connectionPool) {
    this.config = config;
    this.messageQueue = messageQueue;

    this.connectionPool = connectionPool;
    this.builder = new MessageSetBuilder(config).withCompression(Compression.create(config.getCompression()));

    poller.execute(createPoller());//from w w  w . j  a v a 2  s . c  o  m

    jobQueue = new ArrayBlockingQueue<Runnable>(config.getAsyncJobQueueCapacity()) {
        @Override
        public boolean offer(Runnable runnable) {
            try {
                put(runnable); // not to reject the task, slowing down
            } catch (InterruptedException e) {
                // do nothing
            }
            return true;
        }
    };

    senders = new ThreadPoolExecutor(config.getAsyncSenderThreads(), config.getAsyncSenderThreads(), 10,
            TimeUnit.SECONDS, jobQueue, new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    TMessageSet messageSet = ((AsyncSuroSender) r).getMessageSet();
                    for (Message m : new MessageSetReader(messageSet)) {
                        restore(m);
                    }
                }
            });

    rateLimiter = new RateLimiter(rateLimitMsgPerSec.get());
    tags = GraphitePublisher.createSimpleTagList(config.getApp() + "_" + config.getClientType() + "_client");
    Monitors.registerObject(this);
}

From source file:org.apache.hadoop.hbase.util.Threads.java

/**
 * Create a new CachedThreadPool with a bounded number as the maximum 
 * thread size in the pool.//from w  w w .j  a  va  2 s. c  o  m
 * 
 * @param maxCachedThread the maximum thread could be created in the pool
 * @param timeout the maximum time to wait
 * @param unit the time unit of the timeout argument
 * @param threadFactory the factory to use when creating new threads
 * @return threadPoolExecutor the cachedThreadPool with a bounded number 
 * as the maximum thread size in the pool. 
 */
public static ThreadPoolExecutor getBoundedCachedThreadPool(int maxCachedThread, long timeout, TimeUnit unit,
        ThreadFactory threadFactory) {
    ThreadPoolExecutor boundedCachedThreadPool = new ThreadPoolExecutor(maxCachedThread, maxCachedThread,
            timeout, unit, new LinkedBlockingQueue<Runnable>(), threadFactory);
    // allow the core pool threads timeout and terminate
    boundedCachedThreadPool.allowCoreThreadTimeOut(true);
    return boundedCachedThreadPool;
}

From source file:com.blacklocus.qs.worker.QSAssembly.java

/**
 * @return a configured QueueReader to process tasks. The QueueReader must be started via {@link QueueReader#run()}.
 *//*w w w . ja  v  a 2s  .  c  o  m*/
public QueueReader build() {
    validate();

    Runnable heartbeater = Runnables
            .newInfiniteLoggingRunnable(new QSWorkerHeartbeater(workerIdService, logService));
    new Thread(heartbeater, "WorkerHeartbeater").start();

    configuration.addConfiguration(QSConfig.DEFAULTS);

    QueueingStrategy<QSTaskModel> queueingStrategy = QueueingStrategies.newHeapQueueingStrategy(
            configuration.getDouble(QSConfig.PROP_HEAP_STRATEGY_TRIGGER),
            configuration.getLong(QSConfig.PROP_HEAP_STRATEGY_MAX_DELAY),
            configuration.getLong(QSConfig.PROP_HEAP_STRATEGY_HINT));
    QSTaskService taskService = new ThreadedFIFOQSTaskService(queueingStrategy, taskServices);
    TaskServiceIterable taskIterable = new TaskServiceIterable(taskService);

    ExecutorService workerExecutorService = StrategicExecutors.newBalancingThreadPoolExecutor(
            new ThreadPoolExecutor(configuration.getInt(QSConfig.PROP_WORKER_POOL_CORE),
                    configuration.getInt(QSConfig.PROP_WORKER_POOL_MAX), 1, TimeUnit.MINUTES,
                    new SynchronousQueue<Runnable>(), new CallerBlocksPolicy()),
            configuration.getFloat(QSConfig.PROP_WORKER_POOL_UTILIZATION), DEFAULT_SMOOTHING_WEIGHT,
            DEFAULT_BALANCE_AFTER);

    return new QueueReader<QSTaskModel, TaskKit, Object>(taskIterable,
            new WorkerQueueItemHandler(queueingStrategy, taskService, logService, workerIdService, workers),
            workerExecutorService, 0);
}

From source file:com.uber.stream.kafka.mirrormaker.controller.core.OffsetMonitor.java

public OffsetMonitor(final HelixMirrorMakerManager helixMirrorMakerManager, ControllerConf controllerConf) {
    this.numOffsetThread = controllerConf.getNumOffsetThread();
    this.helixMirrorMakerManager = helixMirrorMakerManager;
    this.srcBrokerList = new ArrayList<>();
    this.offsetZkString = controllerConf.getConsumerCommitZkPath().isEmpty()
            ? controllerConf.getSrcKafkaZkPath()
            : controllerConf.getConsumerCommitZkPath();
    this.srcZkString = controllerConf.getSrcKafkaZkPath();
    // disable monitor if SRC_KAFKA_ZK or GROUP_ID is not set
    if (StringUtils.isEmpty(controllerConf.getSrcKafkaZkPath()) || controllerConf.getGroupId().isEmpty()) {
        logger.info("Consumer GROUP_ID is not set. Offset manager is disabled");
        this.refreshIntervalInSec = 0;
    } else {//from   w  w w .ja  v a  2  s  .  c om
        this.refreshIntervalInSec = controllerConf.getOffsetRefreshIntervalInSec();
    }

    this.consumerOffsetPath = "/consumers/" + controllerConf.getGroupId() + "/offsets/";

    this.refreshExecutor = Executors.newSingleThreadScheduledExecutor(
            new ThreadFactoryBuilder().setNameFormat("topic-list-cron-%d").setDaemon(true).build());
    this.cronExecutor = new ThreadPoolExecutor(numOffsetThread, numOffsetThread, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<>(controllerConf.getBlockingQueueSize()),
            new ThreadFactoryBuilder().setNameFormat("topic-offset-cron-%d").setDaemon(true).build());

    this.topicList = new ArrayList<>();
    this.brokerConsumer = new ConcurrentHashMap<>();
    this.partitionLeader = new ConcurrentHashMap<>();
    this.topicPartitionToOffsetMap = new ConcurrentHashMap<>();
    this.noProgressMap = new ConcurrentHashMap<>();
}

From source file:org.opennms.netmgt.rtc.DataSender.java

/**
 * The constructor for this object//w  ww . j a v a2 s.  c  om
 *
 * @param categories
 *            The category map.
 * @param numSenders
 *            The number of senders.
 */
public DataSender(final AvailabilityService dataMgr, final RTCConfigFactory configFactory) {
    m_dataMgr = dataMgr;

    // NMS-7622: Limit the number of queued update tasks with a bounded queue
    m_queue = new LinkedBlockingDeque<Runnable>(Math.max(4 * configFactory.getSenders(), 32));

    m_dsrPool = new ThreadPoolExecutor(1, configFactory.getSenders(), 30, TimeUnit.SECONDS, m_queue,
            new LogPreservingThreadFactory(getClass().getSimpleName(), configFactory.getSenders()));

    // get post error limit
    POST_ERROR_LIMIT = configFactory.getErrorsBeforeUrlUnsubscribe();
}

From source file:org.cloudgraph.cassandra.graph.ParallelGraphAssembler.java

/**
 * Constructor./*from w  ww.  j a  v a 2 s.  com*/
* 
* @param rootType
*            the SDO root type for the result data graph
* @param collector
*            selected SDO properties. Properties are mapped by selected
*            types required in the result graph.
* @param snapshotDate
*            the query snapshot date which is populated into every data
*            object in the result data graph.
* @param minPoolSize the minimum or core size of the underlying thread pool used for
* all tasks executed under this assembler           
* @param maxPoolSize the maximum size of the underlying thread pool used for
* all tasks executed under this assembler           
 * @param con
 */
public ParallelGraphAssembler(PlasmaType rootType, SelectionCollector collector, Timestamp snapshotDate,
        ConfigProps config, Session con) {
    super(rootType, collector, new CQLStatementFactory(), new CQLStatementExecutor(con),
            new ConcurrentHashMap<Integer, PlasmaDataObject>(), snapshotDate);
    this.executorService = new ThreadPoolExecutor(config.getMinThreadPoolSize(), config.getMaxThreadPoolSize(),
            0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(),
            new ThreadPoolExecutor.CallerRunsPolicy());
    this.config = config;
}

From source file:com.ery.estorm.util.Threads.java

/**
 * Create a new CachedThreadPool with a bounded number as the maximum thread size in the pool.
 * // w  w  w . j  ava  2s . c o m
 * @param maxCachedThread
 *            the maximum thread could be created in the pool
 * @param timeout
 *            the maximum time to wait
 * @param unit
 *            the time unit of the timeout argument
 * @param threadFactory
 *            the factory to use when creating new threads
 * @return threadPoolExecutor the cachedThreadPool with a bounded number as the maximum thread size in the pool.
 */
public static ThreadPoolExecutor getBoundedCachedThreadPool(int maxCachedThread, long timeout, TimeUnit unit,
        ThreadFactory threadFactory) {
    ThreadPoolExecutor boundedCachedThreadPool = new ThreadPoolExecutor(maxCachedThread, maxCachedThread,
            timeout, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory);
    // allow the core pool threads timeout and terminate
    boundedCachedThreadPool.allowCoreThreadTimeOut(true);
    return boundedCachedThreadPool;
}

From source file:org.batoo.jpa.benchmark.BenchmarkTest.java

private ThreadPoolExecutor createExecutor(BlockingQueue<Runnable> workQueue) {
    final AtomicInteger nextThreadNo = new AtomicInteger(0);

    final ThreadPoolExecutor executor = new ThreadPoolExecutor(//
            BenchmarkTest.THREAD_COUNT, BenchmarkTest.THREAD_COUNT, // min max threads
            0L, TimeUnit.MILLISECONDS, // the keep alive time - hold it forever
            workQueue, new ThreadFactory() {

                @Override/* w  ww  .  ja v  a2s. c om*/
                public Thread newThread(Runnable r) {
                    final Thread t = new Thread(r);
                    t.setDaemon(true);
                    t.setPriority(Thread.NORM_PRIORITY);
                    t.setName("Benchmark-" + nextThreadNo.get());

                    BenchmarkTest.this.threadIds[nextThreadNo.getAndIncrement()] = t.getId();

                    return t;
                }
            });

    executor.prestartAllCoreThreads();

    return executor;
}

From source file:org.apache.kylin.metadata.cachesync.Broadcaster.java

private Broadcaster(final KylinConfig config) {
    this.config = config;

    final String[] nodes = config.getRestServers();
    if (nodes == null || nodes.length < 1) {
        logger.warn("There is no available rest server; check the 'kylin.server.cluster-servers' config");
    }// w w  w.  jav a 2s. co  m
    logger.debug(nodes.length + " nodes in the cluster: " + Arrays.toString(nodes));

    Executors.newSingleThreadExecutor(new DaemonThreadFactory()).execute(new Runnable() {
        @Override
        public void run() {
            final Map<String, RestClient> restClientMap = Maps.newHashMap();
            final ExecutorService wipingCachePool = new ThreadPoolExecutor(1, 10, 60L, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory());

            while (true) {
                try {
                    final BroadcastEvent broadcastEvent = broadcastEvents.takeFirst();
                    String[] restServers = config.getRestServers();
                    logger.debug("Servers in the cluster: " + Arrays.toString(restServers));
                    for (final String node : restServers) {
                        if (restClientMap.containsKey(node) == false) {
                            restClientMap.put(node, new RestClient(node));
                        }
                    }

                    logger.debug("Announcing new broadcast event: " + broadcastEvent);
                    for (final String node : restServers) {
                        wipingCachePool.execute(new Runnable() {
                            @Override
                            public void run() {
                                try {
                                    restClientMap.get(node).wipeCache(broadcastEvent.getEntity(),
                                            broadcastEvent.getEvent(), broadcastEvent.getCacheKey());
                                } catch (IOException e) {
                                    logger.warn("Thread failed during wipe cache at " + broadcastEvent, e);
                                }
                            }
                        });
                    }
                } catch (Exception e) {
                    logger.error("error running wiping", e);
                }
            }
        }
    });
}