Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters and Executors#defaultThreadFactory default thread factory .

Usage

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

private ExecutorService createExecutorService() {
    ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>(),
            new ThreadFactoryBuilder().setNameFormat("LoadIncrementalHFiles-%1$d").build());
    pool.allowCoreThreadTimeOut(true);// w  w  w. j a  v a 2  s. c  o  m
    return pool;
}

From source file:org.apache.hadoop.hbase.client.TestHCM.java

/**
 * Tests that a destroyed connection does not have a live zookeeper.
 * Below is timing based.  We put up a connection to a table and then close the connection while
 * having a background thread running that is forcing close of the connection to try and
 * provoke a close catastrophe; we are hoping for a car crash so we can see if we are leaking
 * zk connections.//from w ww . j a v  a 2  s  .  c om
 * @throws Exception
 */
@Ignore("Flakey test: See HBASE-8996")
@Test
public void testDeleteForZKConnLeak() throws Exception {
    TEST_UTIL.createTable(TABLE_NAME4, FAM_NAM);
    final Configuration config = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
    config.setInt("zookeeper.recovery.retry", 1);
    config.setInt("zookeeper.recovery.retry.intervalmill", 1000);
    config.setInt("hbase.rpc.timeout", 2000);
    config.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);

    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, 10, 5, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm-delete"));

    pool.submit(new Runnable() {
        @Override
        public void run() {
            while (!Thread.interrupted()) {
                try {
                    HConnection conn = HConnectionManager.getConnection(config);
                    LOG.info("Connection " + conn);
                    HConnectionManager.deleteStaleConnection(conn);
                    LOG.info("Connection closed " + conn);
                    // TODO: This sleep time should be less than the time that it takes to open and close
                    // a table.  Ideally we would do a few runs first to measure.  For now this is
                    // timing based; hopefully we hit the bad condition.
                    Threads.sleep(10);
                } catch (Exception e) {
                }
            }
        }
    });

    // Use connection multiple times.
    for (int i = 0; i < 30; i++) {
        HConnection c1 = null;
        try {
            c1 = ConnectionManager.getConnectionInternal(config);
            LOG.info("HTable connection " + i + " " + c1);
            HTable table = new HTable(config, TABLE_NAME4, pool);
            table.close();
            LOG.info("HTable connection " + i + " closed " + c1);
        } catch (Exception e) {
            LOG.info("We actually want this to happen!!!!  So we can see if we are leaking zk", e);
        } finally {
            if (c1 != null) {
                if (c1.isClosed()) {
                    // cannot use getZooKeeper as method instantiates watcher if null
                    Field zkwField = c1.getClass().getDeclaredField("keepAliveZookeeper");
                    zkwField.setAccessible(true);
                    Object watcher = zkwField.get(c1);

                    if (watcher != null) {
                        if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) {
                            // non-synchronized access to watcher; sleep and check again in case zk connection
                            // hasn't been cleaned up yet.
                            Thread.sleep(1000);
                            if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) {
                                pool.shutdownNow();
                                fail("Live zookeeper in closed connection");
                            }
                        }
                    }
                }
                c1.close();
            }
        }
    }
    pool.shutdownNow();
}

From source file:org.apache.jmeter.protocol.http.sampler.HTTPSamplerBaseClassifier.java

/**
 * Download the resources of an HTML page.
 * /* w  w w.  ja  v  a 2  s  . c  om*/
 * @param res
 *            result of the initial request - must contain an HTML response
 * @param container
 *            for storing the results, if any
 * @param frameDepth
 *            Depth of this target in the frame structure. Used only to
 *            prevent infinite recursion.
 * @return res if no resources exist, otherwise the "Container" result with
 *         one subsample per request issued
 */
protected HTTPSampleResult downloadPageResources(HTTPSampleResult res, HTTPSampleResult container,
        int frameDepth) {
    Iterator<URL> urls = null;
    try {
        final byte[] responseData = res.getResponseData();
        if (responseData.length > 0) { // Bug 39205
            String parserName = getParserClass(res);
            if (parserName != null) {
                final HTMLParser parser = parserName.length() > 0 ? // we
                // have
                // a
                // name
                        HTMLParser.getParser(parserName) : HTMLParser.getParser(); // we don't; use the
                // default parser
                urls = parser.getEmbeddedResourceURLs(responseData, res.getURL(),
                        res.getDataEncodingWithDefault());
            }
        }
    } catch (HTMLParseException e) {
        // Don't break the world just because this failed:
        res.addSubResult(errorResult(e, new HTTPSampleResult(res)));
        setParentSampleSuccess(res, false);
    }

    // Iterate through the URLs and download each image:
    if (urls != null && urls.hasNext()) {
        if (container == null) {
            // TODO needed here because currently done on sample completion
            // in JMeterThread,
            // but that only catches top-level samples.
            res.setThreadName(Thread.currentThread().getName());
            container = new HTTPSampleResult(res);
            container.addRawSubResult(res);
        }
        res = container;

        // Get the URL matcher
        String re = getEmbeddedUrlRE();
        Perl5Matcher localMatcher = null;
        Pattern pattern = null;
        if (re.length() > 0) {
            try {
                pattern = JMeterUtils.getPattern(re);
                localMatcher = JMeterUtils.getMatcher();// don't fetch
                // unless pattern
                // compiles
            } catch (MalformedCachePatternException e) {
                log.warn("Ignoring embedded URL match string: " + e.getMessage());
            }
        }

        // For concurrent get resources
        final List<Callable<AsynSamplerResultHolder>> liste = new ArrayList<Callable<AsynSamplerResultHolder>>();

        while (urls.hasNext()) {
            Object binURL = urls.next(); // See catch clause below
            try {
                URL url = (URL) binURL;
                if (url == null) {
                    log.warn("Null URL detected (should not happen)");
                } else {
                    String urlstr = url.toString();
                    String urlStrEnc = encodeSpaces(urlstr);
                    if (!urlstr.equals(urlStrEnc)) {// There were some
                        // spaces in the URL
                        try {
                            url = new URL(urlStrEnc);
                        } catch (MalformedURLException e) {
                            res.addSubResult(errorResult(new Exception(urlStrEnc + " is not a correct URI"),
                                    new HTTPSampleResult(res)));
                            setParentSampleSuccess(res, false);
                            continue;
                        }
                    }
                    // I don't think localMatcher can be null here, but
                    // check just in case
                    if (pattern != null && localMatcher != null && !localMatcher.matches(urlStrEnc, pattern)) {
                        continue; // we have a pattern and the URL does not
                                  // match, so skip it
                    }

                    if (isConcurrentDwn()) {
                        // if concurrent download emb. resources, add to a
                        // list for async gets later
                        liste.add(new ASyncSample(url, HTTPConstants.GET, false, frameDepth + 1,
                                getCookieManager(), this));
                    } else {
                        // default: serial download embedded resources
                        HTTPSampleResult binRes = sample(url, HTTPConstants.GET, false, frameDepth + 1);
                        res.addSubResult(binRes);
                        setParentSampleSuccess(res, res.isSuccessful() && binRes.isSuccessful());
                    }

                }
            } catch (ClassCastException e) { // TODO can this happen?
                res.addSubResult(errorResult(new Exception(binURL + " is not a correct URI"),
                        new HTTPSampleResult(res)));
                setParentSampleSuccess(res, false);
                continue;
            }
        }
        // IF for download concurrent embedded resources
        if (isConcurrentDwn()) {
            int poolSize = CONCURRENT_POOL_SIZE; // init with default value
            try {
                poolSize = Integer.parseInt(getConcurrentPool());
            } catch (NumberFormatException nfe) {
                log.warn("Concurrent download resources selected, "// $NON-NLS-1$
                        + "but pool size value is bad. Use default value");// $NON-NLS-1$
            }
            // Thread pool Executor to get resources
            // use a LinkedBlockingQueue, note: max pool size doesn't effect
            final ThreadPoolExecutor exec = new ThreadPoolExecutor(poolSize, poolSize, KEEPALIVETIME,
                    TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() {

                        public Thread newThread(final Runnable r) {
                            Thread t = new CleanerThread(new Runnable() {

                                public void run() {
                                    try {
                                        r.run();
                                    } finally {
                                        ((CleanerThread) Thread.currentThread()).notifyThreadEnd();
                                    }
                                }
                            });
                            return t;
                        }
                    });

            boolean tasksCompleted = false;
            try {
                // sample all resources with threadpool
                final List<Future<AsynSamplerResultHolder>> retExec = exec.invokeAll(liste);
                // call normal shutdown (wait ending all tasks)
                exec.shutdown();
                // put a timeout if tasks couldn't terminate
                exec.awaitTermination(AWAIT_TERMINATION_TIMEOUT, TimeUnit.SECONDS);
                CookieManager cookieManager = getCookieManager();
                // add result to main sampleResult
                for (Future<AsynSamplerResultHolder> future : retExec) {
                    AsynSamplerResultHolder binRes;
                    try {
                        binRes = future.get(1, TimeUnit.MILLISECONDS);
                        if (cookieManager != null) {
                            CollectionProperty cookies = binRes.getCookies();
                            PropertyIterator iter = cookies.iterator();
                            while (iter.hasNext()) {
                                Cookie cookie = (Cookie) iter.next().getObjectValue();
                                cookieManager.add(cookie);
                            }
                        }
                        res.addSubResult(binRes.getResult());
                        setParentSampleSuccess(res, res.isSuccessful() && binRes.getResult().isSuccessful());
                    } catch (TimeoutException e) {
                        errorResult(e, res);
                    }
                }
                tasksCompleted = exec.awaitTermination(1, TimeUnit.MILLISECONDS); // did all the tasks finish?
            } catch (InterruptedException ie) {
                log.warn("Interruped fetching embedded resources", ie); // $NON-NLS-1$
            } catch (ExecutionException ee) {
                log.warn("Execution issue when fetching embedded resources", ee); // $NON-NLS-1$
            } finally {
                if (!tasksCompleted) {
                    exec.shutdownNow(); // kill any remaining tasks
                }
            }
        }
    }
    return res;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

@Override
public void run() {
    while (!closed) {
        try {//from  w w  w .  j a v a 2  s  .c om
            Thread.sleep(60000);
            try {
                ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                Map<String, String> md = omd.getUserMetadata();
                ObjectMetadata nmd = new ObjectMetadata();
                nmd.setUserMetadata(md);
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                md.put("hostname", InetAddress.getLocalHost().getHostName());
                md.put("port", Integer.toString(Main.sdfsCliPort));
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.put("md5sum", st);
                nmd.setContentMD5(st);
                nmd.setContentLength(sz.length);
                nmd.setUserMetadata(md);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
            } catch (Exception e) {
                try {
                    ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                    Map<String, String> md = omd.getUserMetadata();
                    ObjectMetadata nmd = new ObjectMetadata();
                    nmd.setUserMetadata(md);
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.put("hostname", InetAddress.getLocalHost().getHostName());
                    md.put("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.put("md5sum", st);
                    nmd.setContentMD5(st);
                    nmd.setContentLength(sz.length);
                    nmd.setUserMetadata(md);

                    this.updateObject(binm, nmd);
                } catch (Exception e1) {
                    SDFSLogger.getLog().error("unable to update metadata for " + binm, e);
                }
            }

            if (this.deletes.size() > 0) {
                SDFSLogger.getLog().info("running garbage collection");
                RejectedExecutionHandler executionHandler = new BlockPolicy();
                BlockingQueue<Runnable> worksQueue = new SynchronousQueue<Runnable>();
                ThreadPoolExecutor executor = new ThreadPoolExecutor(1, Main.dseIOThreads, 10, TimeUnit.SECONDS,
                        worksQueue, executionHandler);
                this.delLock.lock();
                HashMap<Long, Integer> odel = null;
                try {
                    odel = this.deletes;
                    this.deletes = new HashMap<Long, Integer>();
                    // SDFSLogger.getLog().info("delete hash table size of "
                    // + odel.size());
                } finally {
                    this.delLock.unlock();
                }
                Set<Long> iter = odel.keySet();
                for (Long k : iter) {
                    DeleteObject obj = new DeleteObject();
                    obj.k = k;
                    obj.odel = odel;
                    obj.st = this;
                    executor.execute(obj);
                }
                executor.shutdown();
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    SDFSLogger.getLog().debug("Awaiting deletion task completion of threads.");
                }
                SDFSLogger.getLog().info("done running garbage collection");
            }
        } catch (InterruptedException e) {
            break;
        } catch (Exception e) {
            SDFSLogger.getLog().error("error in delete thread", e);
        }
    }

}

From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java

private void startEstimatePool() {
    int minPoolSize = conf.getInt(ESTIMATE_POOL_MIN_THREADS, DEFAULT_ESTIMATE_POOL_MIN_THREADS);
    int maxPoolSize = conf.getInt(ESTIMATE_POOL_MAX_THREADS, DEFAULT_ESTIMATE_POOL_MAX_THREADS);
    int keepAlive = conf.getInt(ESTIMATE_POOL_KEEP_ALIVE_MILLIS, DEFAULT_ESTIMATE_POOL_KEEP_ALIVE_MILLIS);

    final ThreadFactory defaultFactory = Executors.defaultThreadFactory();
    final AtomicInteger thId = new AtomicInteger();
    // We are creating our own thread factory, just so that we can override thread name for easy debugging
    ThreadFactory threadFactory = new ThreadFactory() {
        @Override/*from   ww w  .  j  a  v a2s .  c  om*/
        public Thread newThread(Runnable r) {
            Thread th = defaultFactory.newThread(r);
            th.setName("estimate-" + thId.incrementAndGet());
            return th;
        }
    };

    log.debug("starting estimate pool");

    ThreadPoolExecutor estimatePool = new ThreadPoolExecutor(minPoolSize, maxPoolSize, keepAlive,
            TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), threadFactory);
    estimatePool.allowCoreThreadTimeOut(false);
    estimatePool.prestartCoreThread();
    this.estimatePool = estimatePool;
}

From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java

private void startLauncherPool() {
    int minPoolSize = conf.getInt(LAUNCHER_POOL_MIN_THREADS, DEFAULT_LAUNCHER_POOL_MIN_THREADS);
    int maxPoolSize = conf.getInt(LAUNCHER_POOL_MAX_THREADS, DEFAULT_LAUNCHER_POOL_MAX_THREADS);
    int keepAlive = conf.getInt(LAUNCHER_POOL_KEEP_ALIVE_MILLIS, DEFAULT_LAUNCHER_POOL_KEEP_ALIVE_MILLIS);

    final ThreadFactory defaultFactory = Executors.defaultThreadFactory();
    final AtomicInteger thId = new AtomicInteger();
    // We are creating our own thread factory, just so that we can override thread name for easy debugging
    ThreadFactory threadFactory = new ThreadFactory() {
        @Override//from   ww w  . ja v  a  2 s .  c  o m
        public Thread newThread(Runnable r) {
            Thread th = defaultFactory.newThread(r);
            th.setName("launcher-" + thId.incrementAndGet());
            return th;
        }
    };

    log.debug("starting query launcher pool");

    ThreadPoolExecutor launcherPool = new ThreadPoolExecutor(minPoolSize, maxPoolSize, keepAlive,
            TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), threadFactory);
    launcherPool.allowCoreThreadTimeOut(false);
    launcherPool.prestartCoreThread();
    this.queryLauncherPool = launcherPool;
}

From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java

private void startQueryCancellationPool() {
    ThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("query-cancellation-pool-Thread-%d")
            .priority(Thread.NORM_PRIORITY).build();
    //Using fixed values for pool . corePoolSize = maximumPoolSize = 3  and keepAliveTime = 60 secs
    queryCancellationPool = new ThreadPoolExecutor(3, 3, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(),
            factory);/*from   www.j a va2 s .c  om*/
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

private void createDiskStoreTaskPool() {
    int MAXT = DiskStoreImpl.MAX_CONCURRENT_COMPACTIONS;
    final ThreadGroup compactThreadGroup = LoggingThreadGroup.createThreadGroup("Oplog Compactor Thread Group",
            logger);/*from   w w  w .java  2s . c o  m*/
    /*
     * final ThreadFactory compactThreadFactory = new ThreadFactory() { public Thread
     * newThread(Runnable command) { Thread thread = new Thread(compactThreadGroup, command,
     * "Idle OplogCompactor"); thread.setDaemon(true); return thread; } };
     */

    final ThreadFactory compactThreadFactory = GemfireCacheHelper.CreateThreadFactory(compactThreadGroup,
            "Idle OplogCompactor");
    this.diskStoreTaskPool = new ThreadPoolExecutor(MAXT, MAXT, 1, TimeUnit.SECONDS, new LinkedBlockingQueue(),
            compactThreadFactory);
}