Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:com.sworddance.taskcontrol.TaskControl.java

@SuppressWarnings("unchecked")
public TaskControl(Comparator<PrioritizedTask> activeComparator, int maxThreads, ThreadFactory threadFactory,
        Log log) {/* w  w w .  j ava 2s.  com*/
    this.log = log;
    ApplicationIllegalArgumentException.notNull(activeComparator, "activeComparator");
    this.eligibleTasks = new PriorityBlockingQueue<PrioritizedTask>(20, activeComparator);
    this.stateChangeNotificator = new ReentrantLock();
    this.newTasks = this.stateChangeNotificator.newCondition();
    this.runningTasks = new AtomicInteger(0);
    this.threadFactory = threadFactory;
    int keepAliveTime = 10;

    int corePoolSize = 1;
    this.executor = new ThreadPoolExecutor(corePoolSize, Math.max(corePoolSize, maxThreads), keepAliveTime,
            MICROSECONDS, (BlockingQueue) this.eligibleTasks, threadFactory);
    this.stayActive = true;
}

From source file:nuclei.task.TaskPool.java

TaskPool(Looper mainLooper, final String name, int maxThreads, List<TaskInterceptor> interceptors) {
    this.name = name;
    TASK_POOLS.put(name, this);
    this.interceptors = interceptors;
    handler = new Handler(mainLooper, this);
    BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(128);
    taskRunnablePool = new Pools.SimplePool<>(maxThreads);
    taskQueues = new Pools.SimplePool<>(10);
    maxThreads = Math.max(CORE_POOL_SIZE, maxThreads);
    poolExecutor = new ThreadPoolExecutor(CORE_POOL_SIZE, maxThreads, 1, TimeUnit.SECONDS, workQueue,
            new ThreadFactory() {
                private final AtomicInteger mCount = new AtomicInteger(1);

                @Override//from  www . java 2s  . co m
                public Thread newThread(@NonNull Runnable r) {
                    return new Thread(r, name + " #" + mCount.incrementAndGet());
                }
            });

    if (LISTENER != null)
        LISTENER.onCreated(this);
}

From source file:com.opentech.camel.task.threading.DefaultThreadPool.java

@Override
public void initialize() {
    super.initialize();
    pengingReleaseThreadRequest = new ConcurrentHashMap<String, WrapedTask>();
    innerThreadPool = new ThreadPoolExecutor(coreThreadCount, maxThreadCount, keepAliveTime,
            TimeUnit.MILLISECONDS, workqueue, threadFactory) {

        @Override//w  w w .j  av a  2  s . c  o m
        protected void beforeExecute(Thread t, Runnable r) {
            super.beforeExecute(t, r);
            /*if(r instanceof WrapedTask) {
               WrapedTask wt = ((WrapedTask)r);
               //XXX NO NEED
            }*/
        }

        @Override
        protected void afterExecute(Runnable r, Throwable t) {
            super.afterExecute(r, t);
            if (r instanceof WrapedTask) {
                WrapedTask wt = ((WrapedTask) r);
                if (null != pengingReleaseThreadRequest.putIfAbsent(Thread.currentThread().getName(), wt)) {
                    throw new IllegalStateException("OMG Bug");
                }
                // XXX Release thread resource here, not safe, because of thread not really released here
            }
        }

    };
}

From source file:com.flipkart.aesop.runtime.bootstrap.consumer.DefaultBlockingEventConsumer.java

@Override
public void afterPropertiesSet() throws Exception {
    this.numberOfPartition = Math.min(numberOfPartition, Runtime.getRuntime().availableProcessors());
    LOGGER.info("numberOfPartition used: " + numberOfPartition);
    for (int i = 0; i < numberOfPartition; i++) {
        BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(executorQueueSize);
        executors.add(new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, queue, rejectedExecutionHandler));
    }// w  w w. j  a  va2s.c  om
}

From source file:io.cloudslang.worker.management.services.WorkerManager.java

@PostConstruct
private void init() {
    logger.info("Initialize worker with UUID: " + workerUuid);
    System.setProperty("worker.uuid", workerUuid); //do not remove!!!
    inBuffer = new LinkedBlockingQueue<>();

    executorService = new ThreadPoolExecutor(numberOfThreads, numberOfThreads, Long.MAX_VALUE,
            TimeUnit.NANOSECONDS, inBuffer,
            new WorkerThreadFactory((++threadPoolVersion) + "_WorkerExecutionThread"));

    mapOfRunningTasks = new ConcurrentHashMap<>(numberOfThreads);
}

From source file:org.apache.axis2.util.threadpool.ThreadPool.java

protected ThreadPoolExecutor createDefaultExecutor(final String name, final int priority,
        final boolean daemon) {
    ThreadPoolExecutor rc;/*  w w  w  . ja  v a  2 s. c o  m*/
    if (maxPoolSize == Integer.MAX_VALUE) {
        rc = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 10, TimeUnit.SECONDS, new SynchronousQueue(),
                new DefaultThreadFactory(name, daemon, priority));
    } else {
        rc = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 10, TimeUnit.SECONDS, new LinkedBlockingQueue(),
                new DefaultThreadFactory(name, daemon, priority));
    }
    // FIXME: This API is only in JDK 1.6 - Use reflection?        
    //        rc.allowCoreThreadTimeOut(true);
    return rc;
}

From source file:org.pentaho.reporting.platform.plugin.async.PentahoAsyncExecutor.java

/**
 * @param capacity               thread pool capacity
 * @param autoSchedulerThreshold quantity of rows after which reports are automatically scheduled
 *//*from w  w  w  .  ja  v a2  s.  c  o m*/
public PentahoAsyncExecutor(final int capacity, final int autoSchedulerThreshold) {
    this.autoSchedulerThreshold = autoSchedulerThreshold;
    log.info("Initialized reporting async execution fixed thread pool with capacity: " + capacity);
    executorService = new DelegatedListenableExecutor(new ThreadPoolExecutor(capacity, capacity, 0L,
            TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), new ThreadFactory() {
                @Override
                public Thread newThread(Runnable r) {
                    Thread thread = Executors.defaultThreadFactory().newThread(r);
                    thread.setDaemon(true);
                    thread.setName("PentahoAsyncExecutor Thread Pool");
                    return thread;
                }
            }));
    PentahoSystem.addLogoutListener(this);
    this.writeToJcrListeners = new ConcurrentHashMap<>();
    this.schedulingLocationListener = new MemorizeSchedulingLocationListener();
}

From source file:com.packpublishing.asynchronousandroid.chapter5.Sha1HashBroadcastService.java

@Override
public void onCreate() {

    Log.i("Sha1HashService", "Starting Hashing Service");
    super.onCreate();
    mExecutor = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, 5, TimeUnit.SECONDS, sPoolWorkQueue,
            sThreadFactory);//  w w  w. ja va  2  s .c  om
    mExecutor.prestartAllCoreThreads();

}

From source file:com.amazonaws.mobileconnectors.pinpoint.targeting.TargetingClient.java

/**
 * Initializes a client to manage updating the endpoint profile
 *
 * @param context The {@link PinpointContext}
 */// w  w w . ja  v a  2 s  .  c o m
public TargetingClient(final PinpointContext context) {
    this(context, new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(MAX_EVENT_OPERATIONS), new ThreadPoolExecutor.DiscardPolicy()));
}

From source file:org.apache.hadoop.fs.nfs.stream.NFSBufferedInputStream.java

public NFSBufferedInputStream(NFSv3FileSystemStore store, FileHandle handle, Path f, Configuration conf,
        long splitSize, Credentials credentials, FileSystem.Statistics fsStat) throws IOException {

    this.store = store;
    this.handle = handle;
    this.credentials = credentials;
    this.pathString = f.toUri().getPath();

    doPrefetch = conf.getBoolean("fs.nfs.prefetch", DEFAULT_PREFETCH_ENABLED);

    this.fileOffset = 0L;
    this.readBlockSizeBits = store.getReadSizeBits();
    this.splitSize = splitSize;
    this.closed = new AtomicBoolean(false);
    this.ongoing = new ConcurrentHashMap<>(DEFAULT_PREFETCH_POOL_SIZE);
    this.cache = new ConcurrentHashMap<>(DEFAULT_CACHE_SIZE_IN_BLOCKS);
    this.statistics = new StreamStatistics(NFSBufferedInputStream.class + pathString,
            streamId.getAndIncrement(), true);
    this.executors = new ThreadPoolExecutor(DEFAULT_PREFETCH_POOL_SIZE, MAX_PREFETCH_POOL_SIZE, 5,
            TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1024),
            new ThreadPoolExecutor.CallerRunsPolicy());

    // Keep track of the file length at file open
    // NOTE: The file does not get modified while this stream is open
    Nfs3FileAttributes attributes = store.getFileAttributes(handle, credentials);
    if (attributes != null) {
        this.fileLength = attributes.getSize();
        this.prefetchBlockLimit = (long) (Math.min(fileLength, splitSize) >> readBlockSizeBits);
        if (this.fileLength < 0) {
            throw new IOException("File length is invalid: " + this.fileLength);
        }//from w w w.j a  v a  2  s  . c  o m
    } else {
        throw new IOException("Could not get file length from NFS server");
    }

}