Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters, the default thread factory and the default rejected execution handler.

Usage

From source file:org.wso2.carbon.event.output.adapter.http.HTTPEventAdapter.java

@Override
public void init() throws OutputEventAdapterException {

    tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId();

    //ExecutorService will be assigned  if it is null
    if (executorService == null) {
        int minThread;
        int maxThread;
        long defaultKeepAliveTime;
        int jobQueSize;

        //If global properties are available those will be assigned else constant values will be assigned
        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null) {
            minThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME));
        } else {//from ww  w  .  j a  v  a2 s.c  om
            minThread = HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null) {
            maxThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME));
        } else {
            maxThread = HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME) != null) {
            defaultKeepAliveTime = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME));
        } else {
            defaultKeepAliveTime = HTTPEventAdapterConstants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLIS;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null) {
            jobQueSize = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME));
        } else {
            jobQueSize = HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE;
        }
        executorService = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(jobQueSize));

        //configurations for the httpConnectionManager which will be shared by every http adapter
        int defaultMaxConnectionsPerHost;
        int maxTotalConnections;

        if (globalProperties.get(HTTPEventAdapterConstants.DEFAULT_MAX_CONNECTIONS_PER_HOST) != null) {
            defaultMaxConnectionsPerHost = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.DEFAULT_MAX_CONNECTIONS_PER_HOST));
        } else {
            defaultMaxConnectionsPerHost = HTTPEventAdapterConstants.DEFAULT_DEFAULT_MAX_CONNECTIONS_PER_HOST;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.MAX_TOTAL_CONNECTIONS) != null) {
            maxTotalConnections = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.MAX_TOTAL_CONNECTIONS));
        } else {
            maxTotalConnections = HTTPEventAdapterConstants.DEFAULT_MAX_TOTAL_CONNECTIONS;
        }

        connectionManager = new MultiThreadedHttpConnectionManager();
        connectionManager.getParams().setDefaultMaxConnectionsPerHost(defaultMaxConnectionsPerHost);
        connectionManager.getParams().setMaxTotalConnections(maxTotalConnections);

    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.java

@Override
public void init(Context context) throws IOException {
    super.init(context);
    this.conf = HBaseConfiguration.create(ctx.getConfiguration());
    decorateConf();/*from  w  w w  . j  av a 2  s.  c om*/
    this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
    this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
            maxRetriesMultiplier);
    // TODO: This connection is replication specific or we should make it particular to
    // replication and make replication specific settings such as compression or codec to use
    // passing Cells.
    this.conn = (HConnection) ConnectionFactory.createConnection(this.conf);
    this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
    this.metrics = context.getMetrics();
    // ReplicationQueueInfo parses the peerId out of the znode for us
    this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
    // per sink thread pool
    this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY,
            HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
    this.exec = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
}

From source file:com.projectsexception.myapplist.iconloader.IconManager.java

/**
 * Constructs the work queues and thread pools used to download and decode images.
 *//*from  w  ww.  j a  v  a2 s. com*/
private IconManager() {

    /*
     * Creates a work queue for the pool of Thread objects used for downloading, using a linked
     * list queue that blocks when the queue is empty.
     */
    mDownloadWorkQueue = new LinkedBlockingQueue<Runnable>();

    /*
     * Creates a work queue for the set of of task objects that control downloading and
     * decoding, using a linked list queue that blocks when the queue is empty.
     */
    mIconTaskWorkQueue = new LinkedBlockingQueue<IconTask>();

    /*
     * Creates a new pool of Thread objects for the download work queue
     */
    mDownloadThreadPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, mDownloadWorkQueue);

    // Instantiates a new cache based on the cache size estimate
    mIconCache = new LruCache<String, Drawable>(IMAGE_CACHE_SIZE);

    /*
     * Instantiates a new anonymous Handler object and defines its
     * handleMessage() method. The Handler *must* run on the UI thread, because it moves
     * Drawables from the IconTask object to the View object.
     * To force the Handler to run on the UI thread, it's defined as part of the IconManager
     * constructor. The constructor is invoked when the class is first referenced, and that
     * happens when the View invokes startDownload. Since the View runs on the UI Thread, so
     * does the constructor and the Handler.
     */
    mHandler = new Handler(Looper.getMainLooper()) {

        /*
         * handleMessage() defines the operations to perform when the
         * Handler receives a new Message to process.
         */
        @Override
        public void handleMessage(Message inputMessage) {

            // Gets the image task from the incoming Message object.
            IconTask iconTask = (IconTask) inputMessage.obj;

            // Sets an IconView that's a weak reference to the
            // input ImageView
            IconView localView = iconTask.getIconView();

            // If this input view isn't null
            if (localView != null) {

                /*
                 * Gets the package name of the *weak reference* to the input
                 * ImageView. The weak reference won't have changed, even if
                 * the input ImageView has.
                 */
                String packageName = localView.getPackageName();

                /*
                 * Compares the URL of the input ImageView to the URL of the
                 * weak reference. Only updates the drawable in the ImageView
                 * if this particular Thread is supposed to be serving the
                 * ImageView.
                 */
                if (iconTask.getPackageName() != null && iconTask.getPackageName().equals(packageName)) {

                    /*
                     * Chooses the action to take, based on the incoming message
                     */
                    switch (inputMessage.what) {

                    // If the download has started, sets background color to dark green
                    case LOAD_STARTED:
                        localView.setStatusResource(R.drawable.ic_default_launcher);
                        break;
                    /*
                     * The decoding is done, so this sets the
                     * ImageView's bitmap to the bitmap in the
                     * incoming message
                     */
                    case TASK_COMPLETE:
                        localView.setImageDrawable(iconTask.getDrawable());
                        recycleTask(iconTask);
                        break;
                    // The download failed, sets the background color to dark red
                    case LOAD_FAILED:
                        localView.setStatusResource(R.drawable.ic_default_launcher);

                        // Attempts to re-use the Task object
                        recycleTask(iconTask);
                        break;
                    default:
                        // Otherwise, calls the super method
                        super.handleMessage(inputMessage);
                    }
                }
            }
        }
    };
}

From source file:org.opentripplanner.routing.algorithm.strategies.WeightTable.java

/**
 * Build the weight table, parallelized according to the number of processors 
 *//*w ww.jav a2s  .co  m*/
public void buildTable() {
    ArrayList<TransitStop> stopVertices;

    LOG.debug("Number of vertices: " + g.getVertices().size());
    stopVertices = new ArrayList<TransitStop>();
    for (Vertex gv : g.getVertices())
        if (gv instanceof TransitStop)
            stopVertices.add((TransitStop) gv);
    int nStops = stopVertices.size();

    stopIndices = new IdentityHashMap<Vertex, Integer>(nStops);
    for (int i = 0; i < nStops; i++)
        stopIndices.put(stopVertices.get(i), i);
    LOG.debug("Number of stops: " + nStops);

    table = new float[nStops][nStops];
    for (float[] row : table)
        Arrays.fill(row, Float.POSITIVE_INFINITY);

    LOG.debug("Performing search at each transit stop.");

    int nThreads = Runtime.getRuntime().availableProcessors();
    LOG.debug("number of threads: " + nThreads);
    ArrayBlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(nStops);
    ThreadPoolExecutor threadPool = new ThreadPoolExecutor(nThreads, nThreads, 10, TimeUnit.SECONDS, taskQueue);
    GenericObjectPool heapPool = new GenericObjectPool(
            new PoolableBinHeapFactory<State>(g.getVertices().size()), nThreads);

    // make one heap and recycle it
    RoutingRequest options = new RoutingRequest();
    // TODO LG Check this change:
    options.setWalkSpeed(maxWalkSpeed);
    final double MAX_WEIGHT = 60 * 60 * options.walkReluctance;
    final double OPTIMISTIC_BOARD_COST = options.getBoardCostLowerBound();

    // create a task for each transit stop in the graph
    ArrayList<Callable<Void>> tasks = new ArrayList<Callable<Void>>();
    for (TransitStop origin : stopVertices) {
        SPTComputer task = new SPTComputer(heapPool, options, MAX_WEIGHT, OPTIMISTIC_BOARD_COST, origin);
        tasks.add(task);
    }
    try {
        //invoke all of tasks.
        threadPool.invokeAll(tasks);
        threadPool.shutdown();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    floyd();
}

From source file:org.apache.hadoop.hdfs.server.hightidenode.FileFixer.java

FileFixer(Configuration conf) throws IOException {
    this.conf = conf;
    blockFixInterval = conf.getInt("hightide.blockfix.interval", blockFixInterval);
    numThreads = conf.getInt("hightide.blockfix.numthreads", numThreads);

    pathToPolicy = new LinkedList<PathToPolicy>();
    executor = new ThreadPoolExecutor(numThreads, numThreads, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());

    // start a thread to purge enties from this set automatically
    filesBeingFixed = new PendingReplication(conf.getInt("dfs.hightide.pending.timeout.sec", -1) * 1000L);
}

From source file:org.wso2.carbon.device.mgt.iot.output.adapter.mqtt.MQTTEventAdapter.java

@Override
public void init() throws OutputEventAdapterException {
    tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId(true);
    //ThreadPoolExecutor will be assigned  if it is null
    if (threadPoolExecutor == null) {
        int minThread;
        int maxThread;
        int jobQueSize;
        long defaultKeepAliveTime;
        //If global properties are available those will be assigned else constant values will be assigned
        if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null) {
            minThread = Integer.parseInt(
                    globalProperties.get(MQTTEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME));
        } else {//from w  ww  .j  a va 2 s.  c  o  m
            minThread = MQTTEventAdapterConstants.DEFAULT_MIN_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null) {
            maxThread = Integer.parseInt(
                    globalProperties.get(MQTTEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME));
        } else {
            maxThread = MQTTEventAdapterConstants.DEFAULT_MAX_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME) != null) {
            defaultKeepAliveTime = Integer
                    .parseInt(globalProperties.get(MQTTEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME));
        } else {
            defaultKeepAliveTime = MQTTEventAdapterConstants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLIS;
        }

        if (globalProperties.get(MQTTEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null) {
            jobQueSize = Integer.parseInt(
                    globalProperties.get(MQTTEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME));
        } else {
            jobQueSize = MQTTEventAdapterConstants.DEFAULT_EXECUTOR_JOB_QUEUE_SIZE;
        }

        threadPoolExecutor = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(jobQueSize));
    }
}

From source file:edu.berkeley.sparrow.examples.BackendBenchmarkProfiler.java

/**
 * Run an experiment which launches tasks at {@code arrivalRate} for {@code durationMs}
 * seconds and waits for all tasks to finish. Return a {@link DescriptiveStatistics}
 * object which contains stats about the distribution of task finish times. Tasks
 * are executed in a thread pool which contains at least {@code corePoolSize} threads
 * and grows up to {@code maxPoolSize} threads (growing whenever a new task arrives
 * and all existing threads are used). // www. j  a  v a2 s . c o m
 * 
 * Setting {@code maxPoolSize} to a very large number enacts time sharing, while
 * setting it equal to {@code corePoolSize} creates a fixed size task pool.
 * 
 * The derivative of task finishes is tracked by bucketing tasks at the granularity
 * {@code bucketSize}. If it is detected that task finishes are increasing in an 
 * unbounded fashion (i.e. infinite queuing is occuring) a {@link RuntimeException} 
 * is thrown.
 */
public static void runExperiment(double arrivalRate, int corePoolSize, int maxPoolSize, long bucketSize,
        long durationMs, DescriptiveStatistics runTimes, DescriptiveStatistics waitTimes) {
    long startTime = System.currentTimeMillis();
    long keepAliveTime = 10;
    Random r = new Random();
    BlockingQueue<Runnable> runQueue = new LinkedBlockingQueue<Runnable>();
    ExecutorService threadPool = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
            TimeUnit.MILLISECONDS, runQueue);
    if (maxPoolSize == Integer.MAX_VALUE) {
        threadPool = Executors.newCachedThreadPool();
    }

    // run times indexed by bucketing interval
    HashMap<Long, List<Long>> bucketedRunTimes = new HashMap<Long, List<Long>>();
    // wait times indexed by bucketing interval
    HashMap<Long, List<Long>> bucketedWaitTimes = new HashMap<Long, List<Long>>();

    /*
     * This is a little tricky. 
     * 
     * We want to generate inter-arrival delays according to the arrival rate specified.
     * The simplest option would be to generate an arrival delay and then sleep() for it
     * before launching each task. This has in issue, however: sleep() might wait 
     * several ms longer than we ask it to. When task arrival rates get really fast, 
     * i.e. one task every 10 ms, sleeping an additional few ms will mean we launch 
     * tasks at a much lower rate than requested.
     * 
     * Instead, we keep track of task launches in a way that does not depend on how long
     * sleep() actually takes. We still might have tasks launch slightly after their
     * scheduled launch time, but we will not systematically "fall behind" due to
     * compounding time lost during sleep()'s;
     */
    long currTime = startTime;
    while (true) {
        long delay = (long) (generateInterarrivalDelay(r, arrivalRate) * 1000);

        // When should the next task launch, based on when the last task was scheduled
        // to launch.
        long nextTime = currTime + delay;

        // Diff gives how long we should wait for the next scheduled task. The difference 
        // may be negative if our last sleep() lasted too long relative to the inter-arrival
        // delay based on the last scheduled launch, so we round up to 0 in that case. 
        long diff = Math.max(0, nextTime - System.currentTimeMillis());
        currTime = nextTime;
        if (diff > 0) {
            try {
                Thread.sleep(diff);
            } catch (InterruptedException e) {
                System.err.println("Unexpected interruption!");
                System.exit(1);
            }
        }
        threadPool.submit((new BenchmarkRunnable(bucketedRunTimes, bucketedWaitTimes, bucketSize)));
        if (System.currentTimeMillis() > startTime + durationMs) {
            break;
        }
    }
    threadPool.shutdown();
    try {
        threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e1) {
        System.err.println("Unexpected interruption!");
        System.exit(1);
    }
    List<Long> times = new ArrayList<Long>(bucketedRunTimes.keySet());
    Collections.sort(times);
    HashMap<Long, DescriptiveStatistics> bucketStats = new HashMap<Long, DescriptiveStatistics>();

    // Remove first and last buckets since they will not be completely full to do
    // discretization. 
    times.remove(0);
    times.remove(times.size() - 1);

    for (Long time : times) {
        DescriptiveStatistics stats = new DescriptiveStatistics();
        List<Long> list = bucketedRunTimes.get(time);
        for (Long l : list) {
            stats.addValue(l);
            runTimes.addValue(l);
        }
        bucketStats.put(time, stats);

        List<Long> waitList = bucketedWaitTimes.get(time);
        for (Long l : waitList) {
            waitTimes.addValue(l);
        }
    }
    int size = bucketStats.size();
    if (size >= 2) {
        DescriptiveStatistics first = bucketStats.get(times.get(0));
        DescriptiveStatistics last = bucketStats.get(times.get(times.size() - 1));
        double increase = last.getPercentile(50) / first.getPercentile(50);
        // A simple heuristic, if the median runtime went up by five from the first to 
        // last complete bucket, we assume we are seeing unbounded growth
        if (increase > 5.0) {
            throw new RuntimeException(
                    "Queue not in steady state: " + last.getMean() + " vs " + first.getMean());
        }
    }
}

From source file:org.hyperic.hq.escalation.server.session.EscalationRuntimeImpl.java

@Autowired
public EscalationRuntimeImpl(EscalationStateDAO escalationStateDao, AuthzSubjectManager authzSubjectManager,
        AlertDAO alertDAO, GalertLogDAO galertLogDAO, ConcurrentStatsCollector concurrentStatsCollector) {
    this.escalationStateDao = escalationStateDao;
    this.authzSubjectManager = authzSubjectManager;
    this.alertDAO = alertDAO;
    this.galertLogDAO = galertLogDAO;
    this.concurrentStatsCollector = concurrentStatsCollector;
    // Want threads to never die (XXX, scottmf, keeping current
    // functionality to get rid of
    // backport apis but don't think this is a good idea)
    // 3 threads to service requests
    _executor = new ThreadPoolExecutor(3, 3, Long.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue());
}

From source file:com.facebook.presto.accumulo.tools.TimestampCheckTask.java

public int exec() throws Exception {
    // Create the instance and the connector
    Instance inst = new ZooKeeperInstance(config.getInstance(), config.getZooKeepers());
    Connector connector = inst.getConnector(config.getUsername(), new PasswordToken(config.getPassword()));

    if (auths == null) {
        auths = connector.securityOperations().getUserAuthorizations(config.getUsername());
    }//from w  w w  . j a  v a2s.  c  om

    // Fetch the table metadata
    ZooKeeperMetadataManager manager = new ZooKeeperMetadataManager(config, new TypeRegistry());

    LOG.info("Scanning Presto metadata for tables...");
    AccumuloTable table = manager.getTable(new SchemaTableName(schema, tableName));

    if (table == null) {
        LOG.error("Table is null, does it exist?");
        return 1;
    }

    AccumuloRowSerializer serializer = new LexicoderRowSerializer();

    startBytes = serializer.encode(TimestampType.TIMESTAMP, PARSER.parseDateTime(start).getMillis());
    endBytes = serializer.encode(TimestampType.TIMESTAMP, PARSER.parseDateTime(end).getMillis());

    this.range = new Range(new Text(startBytes), new Text(endBytes));

    long timestamp = System.currentTimeMillis();

    Optional<AccumuloColumnHandle> columnHandle = table.getColumns().stream()
            .filter(handle -> handle.getName().equalsIgnoreCase(column)).findAny();
    checkArgument(columnHandle.isPresent(), "no column found");

    ExecutorService service = MoreExecutors.getExitingExecutorService(
            new ThreadPoolExecutor(3, 3, 0, TimeUnit.MILLISECONDS, new SynchronousQueue<>()));

    List<Future<Void>> tasks = service.invokeAll(ImmutableList.of(() -> {
        getDataCount(connector, table, columnHandle.get(), timestamp);
        return null;
    }, () -> {
        getIndexCount(connector, table, columnHandle.get(), timestamp);
        return null;
    }, () -> {
        getMetricCount(connector, table, columnHandle.get(), timestamp);
        return null;
    }));

    for (Future<Void> task : tasks) {
        task.get();
    }

    LOG.info("Finished");
    return 0;
}

From source file:org.apache.hadoop.hbase.executor.HBaseExecutorService.java

protected HBaseExecutorService(String name) {
    this.name = name;
    // create the thread pool executor
    threadPoolExecutor = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAliveTimeInMillis,
            TimeUnit.MILLISECONDS, workQueue);
    // name the threads for this threadpool
    threadPoolExecutor.setThreadFactory(new NamedThreadFactory(name));
}