List of usage examples for java.util.concurrent ThreadFactory ThreadFactory
ThreadFactory
From source file:de.hybris.platform.servicelayer.tx.ItemModelTransactionTest.java
private <V> V runInOtherThread(final Callable<V> callable, final int timeoutSeconds) { final ExecutorService pool = Executors.newFixedThreadPool(1, new ThreadFactory() { final Tenant tenant = Registry.getCurrentTenantNoFallback(); @Override//from w w w. ja v a2 s . c o m public Thread newThread(final Runnable r) { return new Thread(r) { @Override public void run() { try { Registry.setCurrentTenant(tenant); super.run(); } finally { JaloSession.deactivate(); Registry.unsetCurrentTenant(); } } }; } }); try { final Future<V> future = pool.submit(callable); return future.get(timeoutSeconds, TimeUnit.SECONDS); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); fail("interrupted while waiting"); } catch (final ExecutionException e) { fail("unexpected execution exception " + e.getCause()); } catch (final TimeoutException e) { fail("callable " + callable + " did not finish within maximum " + timeoutSeconds + " seconds to wait"); } finally { pool.shutdownNow(); } return null; }
From source file:org.apache.hadoop.raid.RaidShell.java
/** * checks the raided file system, prints a list of corrupt files to * this.out and returns the number of corrupt files. * Also prints out the total number of files with at least one missing block. * When called with '-retNumStrpsMissingBlks', also prints out number of stripes * with certain number of blocks missing for files using the 'RS' codec. *//*from w w w . j a v a2s .c o m*/ public void fsck(String cmd, String[] args, int startIndex) throws IOException { final int numFsckArgs = args.length - startIndex; int numThreads = 16; String path = "/"; boolean argsOk = false; boolean countOnly = false; boolean cntMissingBlksPerStrp = false; boolean listRecoverableFile = false; if (numFsckArgs >= 1) { argsOk = true; path = args[startIndex]; } for (int i = startIndex + 1; i < args.length; i++) { if (args[i].equals("-threads")) { numThreads = Integer.parseInt(args[++i]); } else if (args[i].equals("-count")) { countOnly = true; } else if (args[i].equals("-retNumStrpsMissingBlks")) { cntMissingBlksPerStrp = true; } else if (args[i].equals("-listrecoverablefiles")) { listRecoverableFile = true; } } if (!argsOk) { printUsage(cmd); return; } final String dateString = dateFormat.format(new Date()); ; System.err .println("Running RAID FSCK with " + numThreads + " threads on " + path + " at time " + dateString); FileSystem fs = (new Path(path)).getFileSystem(conf); // if we got a raid fs, get the underlying fs if (fs instanceof DistributedRaidFileSystem) { fs = ((DistributedRaidFileSystem) fs).getFileSystem(); } // check that we have a distributed fs if (!(fs instanceof DistributedFileSystem)) { throw new IOException("expected DistributedFileSystem but got " + fs.getClass().getName()); } final DistributedFileSystem dfs = (DistributedFileSystem) fs; // get a list of corrupted files (not considering parity blocks just yet) // from the name node // these are the only files we need to consider: // if a file has no corrupted data blocks, it is OK even if some // of its parity blocks are corrupted, so no further checking is // necessary System.err.println("Querying NameNode for list of corrupt files under " + path); final String[] files = DFSUtil.getCorruptFiles(dfs, path); final List<String> corruptFileCandidates = new LinkedList<String>(); for (final String f : files) { // if this file is a parity file // or if it does not start with the specified path, // ignore it boolean matched = false; for (Codec c : Codec.getCodecs()) { if (f.startsWith(c.getParityPrefix())) { matched = true; } } if (!matched) { corruptFileCandidates.add(f); } } // filter files marked for deletion RaidUtils.filterTrash(conf, corruptFileCandidates); //clear numStrpMissingBlks if missing blocks per stripe is to be counted if (cntMissingBlksPerStrp) { for (AtomicLongArray numStrpMissingBlks : numStrpMissingBlksMap.values()) { for (int i = 0; i < numStrpMissingBlks.length(); i++) { numStrpMissingBlks.set(i, 0); } } } System.err.println("Processing " + corruptFileCandidates.size() + " possibly corrupt files using " + numThreads + " threads"); ExecutorService executor = null; ThreadFactory factory = new ThreadFactory() { final AtomicInteger tnum = new AtomicInteger(); public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName("Raidfsck-" + dateString + "-" + tnum.incrementAndGet()); return t; } }; if (numThreads > 1) { executor = Executors.newFixedThreadPool(numThreads, factory); } else { numThreads = 1; } final List<String> unRecoverableFiles = Collections.synchronizedList(new LinkedList<String>()); final List<String> recoverableFiles = Collections.synchronizedList(new LinkedList<String>()); final boolean finalCountOnly = countOnly; final boolean finalMissingBlksPerStrpCnt = cntMissingBlksPerStrp; final boolean finalListRecoverableFile = listRecoverableFile; final int step = numThreads; final AtomicInteger finishNum = new AtomicInteger(0); for (int i = 0; i < numThreads; i++) { if (!dfs.getClient().isOpen()) { throw new IOException("Filesystem closed."); } final int startIdx = i; Runnable work = new Runnable() { public void run() { try { for (int idx = startIdx; idx < corruptFileCandidates.size(); idx += step) { String corruptFileCandidate = corruptFileCandidates.get(idx); boolean corrupt = false; try { FileStatus corruptStat; try { corruptStat = dfs.getFileStatus(new Path(corruptFileCandidate)); } catch (FileNotFoundException fnfe) { continue; } if (!dfs.getClient().isOpen()) { LOG.warn("Filesystem closed."); return; } corrupt = isFileCorrupt(dfs, corruptStat, finalMissingBlksPerStrpCnt); if (corrupt) { incrCorruptCount(); if (!finalCountOnly && !finalListRecoverableFile) { unRecoverableFiles.add(corruptFileCandidate); } } else { if (!finalCountOnly && finalListRecoverableFile) { recoverableFiles.add(corruptFileCandidate); } } } catch (Throwable e) { LOG.error("Error in processing " + corruptFileCandidate, e); } } } finally { finishNum.incrementAndGet(); } } }; if (executor != null) { executor.execute(work); } else { work.run(); } } if (executor != null) { try { while (finishNum.get() < numThreads) { try { Thread.sleep(2000); } catch (InterruptedException ie) { LOG.warn("Raidfsck get exception ", ie); throw new IOException(ie); } } } finally { executor.shutdown(); // Waits for submitted tasks to finish. } } // If client is closed, fail the fsck check. if (!dfs.getClient().isOpen()) { throw new IOException("Filesystem closed."); } if (countOnly) { //Number of corrupt files (which cannot be fixed by Raid) out.println(getCorruptCount()); LOG.info("Nubmer of corrupt files:" + getCorruptCount()); //Number of files with at least one missing block out.println(corruptFileCandidates.size()); LOG.info("Number of files with at least one block missing/corrupt: " + corruptFileCandidates.size()); } else { if (listRecoverableFile) { for (String file : recoverableFiles) { out.println(file); } } else { for (String file : unRecoverableFiles) { out.println(file); } } } /*Number of stripes with missing blocks array, separated by each code id: * Number of missing blocks found from non-raided files. * codeId1 * index 0: Number of stripes found with one block missing in this fsck * index 1: Number of stripes found with two block missing in this fsck * and so on * codeId2 * index 0: Number of stripes found with one block missing in this fsck * index 1: Number of stripes found with two block missing in this fsck * and so on */ if (cntMissingBlksPerStrp) { out.println(this.numNonRaidedMissingBlks); for (String codecId : numStrpMissingBlksMap.keySet()) { out.println(codecId); AtomicLongArray numStrpMissingBlks = numStrpMissingBlksMap.get(codecId); for (int j = 0; j < numStrpMissingBlks.length(); j++) { long temp = numStrpMissingBlks.get(j); out.println(temp); LOG.info("Number of stripes with missing blocks at index " + j + " is " + temp); } } } }
From source file:com.alibaba.cobar.client.CobarSqlMapClientTemplate.java
private ExecutorService createCustomExecutorService(int poolSize, final String method) { int coreSize = Runtime.getRuntime().availableProcessors(); if (poolSize < coreSize) { coreSize = poolSize;/*from www .j a v a 2 s . c om*/ } ThreadFactory tf = new ThreadFactory() { public Thread newThread(Runnable r) { Thread t = new Thread(r, "thread created at CobarSqlMapClientTemplate method [" + method + "]"); t.setDaemon(true); return t; } }; BlockingQueue<Runnable> queueToUse = new LinkedBlockingQueue<Runnable>(coreSize); final ThreadPoolExecutor executor = new ThreadPoolExecutor(coreSize, poolSize, 60, TimeUnit.SECONDS, queueToUse, tf, new ThreadPoolExecutor.CallerRunsPolicy()); return executor; }
From source file:org.apache.jmeter.protocol.http.sampler.HTTPSamplerBaseClassifier.java
/** * Download the resources of an HTML page. * // w ww .ja v a2s. c o m * @param res * result of the initial request - must contain an HTML response * @param container * for storing the results, if any * @param frameDepth * Depth of this target in the frame structure. Used only to * prevent infinite recursion. * @return res if no resources exist, otherwise the "Container" result with * one subsample per request issued */ protected HTTPSampleResult downloadPageResources(HTTPSampleResult res, HTTPSampleResult container, int frameDepth) { Iterator<URL> urls = null; try { final byte[] responseData = res.getResponseData(); if (responseData.length > 0) { // Bug 39205 String parserName = getParserClass(res); if (parserName != null) { final HTMLParser parser = parserName.length() > 0 ? // we // have // a // name HTMLParser.getParser(parserName) : HTMLParser.getParser(); // we don't; use the // default parser urls = parser.getEmbeddedResourceURLs(responseData, res.getURL(), res.getDataEncodingWithDefault()); } } } catch (HTMLParseException e) { // Don't break the world just because this failed: res.addSubResult(errorResult(e, new HTTPSampleResult(res))); setParentSampleSuccess(res, false); } // Iterate through the URLs and download each image: if (urls != null && urls.hasNext()) { if (container == null) { // TODO needed here because currently done on sample completion // in JMeterThread, // but that only catches top-level samples. res.setThreadName(Thread.currentThread().getName()); container = new HTTPSampleResult(res); container.addRawSubResult(res); } res = container; // Get the URL matcher String re = getEmbeddedUrlRE(); Perl5Matcher localMatcher = null; Pattern pattern = null; if (re.length() > 0) { try { pattern = JMeterUtils.getPattern(re); localMatcher = JMeterUtils.getMatcher();// don't fetch // unless pattern // compiles } catch (MalformedCachePatternException e) { log.warn("Ignoring embedded URL match string: " + e.getMessage()); } } // For concurrent get resources final List<Callable<AsynSamplerResultHolder>> liste = new ArrayList<Callable<AsynSamplerResultHolder>>(); while (urls.hasNext()) { Object binURL = urls.next(); // See catch clause below try { URL url = (URL) binURL; if (url == null) { log.warn("Null URL detected (should not happen)"); } else { String urlstr = url.toString(); String urlStrEnc = encodeSpaces(urlstr); if (!urlstr.equals(urlStrEnc)) {// There were some // spaces in the URL try { url = new URL(urlStrEnc); } catch (MalformedURLException e) { res.addSubResult(errorResult(new Exception(urlStrEnc + " is not a correct URI"), new HTTPSampleResult(res))); setParentSampleSuccess(res, false); continue; } } // I don't think localMatcher can be null here, but // check just in case if (pattern != null && localMatcher != null && !localMatcher.matches(urlStrEnc, pattern)) { continue; // we have a pattern and the URL does not // match, so skip it } if (isConcurrentDwn()) { // if concurrent download emb. resources, add to a // list for async gets later liste.add(new ASyncSample(url, HTTPConstants.GET, false, frameDepth + 1, getCookieManager(), this)); } else { // default: serial download embedded resources HTTPSampleResult binRes = sample(url, HTTPConstants.GET, false, frameDepth + 1); res.addSubResult(binRes); setParentSampleSuccess(res, res.isSuccessful() && binRes.isSuccessful()); } } } catch (ClassCastException e) { // TODO can this happen? res.addSubResult(errorResult(new Exception(binURL + " is not a correct URI"), new HTTPSampleResult(res))); setParentSampleSuccess(res, false); continue; } } // IF for download concurrent embedded resources if (isConcurrentDwn()) { int poolSize = CONCURRENT_POOL_SIZE; // init with default value try { poolSize = Integer.parseInt(getConcurrentPool()); } catch (NumberFormatException nfe) { log.warn("Concurrent download resources selected, "// $NON-NLS-1$ + "but pool size value is bad. Use default value");// $NON-NLS-1$ } // Thread pool Executor to get resources // use a LinkedBlockingQueue, note: max pool size doesn't effect final ThreadPoolExecutor exec = new ThreadPoolExecutor(poolSize, poolSize, KEEPALIVETIME, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() { public Thread newThread(final Runnable r) { Thread t = new CleanerThread(new Runnable() { public void run() { try { r.run(); } finally { ((CleanerThread) Thread.currentThread()).notifyThreadEnd(); } } }); return t; } }); boolean tasksCompleted = false; try { // sample all resources with threadpool final List<Future<AsynSamplerResultHolder>> retExec = exec.invokeAll(liste); // call normal shutdown (wait ending all tasks) exec.shutdown(); // put a timeout if tasks couldn't terminate exec.awaitTermination(AWAIT_TERMINATION_TIMEOUT, TimeUnit.SECONDS); CookieManager cookieManager = getCookieManager(); // add result to main sampleResult for (Future<AsynSamplerResultHolder> future : retExec) { AsynSamplerResultHolder binRes; try { binRes = future.get(1, TimeUnit.MILLISECONDS); if (cookieManager != null) { CollectionProperty cookies = binRes.getCookies(); PropertyIterator iter = cookies.iterator(); while (iter.hasNext()) { Cookie cookie = (Cookie) iter.next().getObjectValue(); cookieManager.add(cookie); } } res.addSubResult(binRes.getResult()); setParentSampleSuccess(res, res.isSuccessful() && binRes.getResult().isSuccessful()); } catch (TimeoutException e) { errorResult(e, res); } } tasksCompleted = exec.awaitTermination(1, TimeUnit.MILLISECONDS); // did all the tasks finish? } catch (InterruptedException ie) { log.warn("Interruped fetching embedded resources", ie); // $NON-NLS-1$ } catch (ExecutionException ee) { log.warn("Execution issue when fetching embedded resources", ee); // $NON-NLS-1$ } finally { if (!tasksCompleted) { exec.shutdownNow(); // kill any remaining tasks } } } } return res; }
From source file:com.emc.storageos.systemservices.impl.upgrade.CoordinatorClientExt.java
/** * Initialization method.//from ww w.j a v a 2 s. c o m * On standby site, start a thread to monitor local coordinatorsvc status * On active site, start a thread to monitor db quorum of each standby site */ public void start() { if (drUtil.isStandby()) { _log.info("Start monitoring local coordinatorsvc status on standby site"); ScheduledExecutorService exe = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(r, "CoordinatorsvcMonitor"); } }); // delay for a period of time to start the monitor. For DR switchover, we stop original active, then start new active. // So the original active may not see the new active immediately after reboot exe.scheduleAtFixedRate(coordinatorSvcMonitor, 3 * COODINATOR_MONITORING_INTERVAL, COODINATOR_MONITORING_INTERVAL, TimeUnit.SECONDS); } else { _log.info("Start monitoring db quorum on all standby sites"); ScheduledExecutorService exe = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(r, "DbsvcQuorumMonitor"); } }); exe.scheduleAtFixedRate(new DbsvcQuorumMonitor(getMyNodeId(), _coordinator, dbCommonInfo), 0, DB_MONITORING_INTERVAL, TimeUnit.SECONDS); } }
From source file:org.apache.hadoop.hbase.regionserver.HRegion.java
static ThreadPoolExecutor getOpenAndCloseThreadPool(int maxThreads, final String threadNamePrefix) { return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { private int count = 1; @Override// ww w . j a va2 s .c o m public Thread newThread(Runnable r) { return new Thread(r, threadNamePrefix + "-" + count++); } }); }
From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java
private void startEstimatePool() { int minPoolSize = conf.getInt(ESTIMATE_POOL_MIN_THREADS, DEFAULT_ESTIMATE_POOL_MIN_THREADS); int maxPoolSize = conf.getInt(ESTIMATE_POOL_MAX_THREADS, DEFAULT_ESTIMATE_POOL_MAX_THREADS); int keepAlive = conf.getInt(ESTIMATE_POOL_KEEP_ALIVE_MILLIS, DEFAULT_ESTIMATE_POOL_KEEP_ALIVE_MILLIS); final ThreadFactory defaultFactory = Executors.defaultThreadFactory(); final AtomicInteger thId = new AtomicInteger(); // We are creating our own thread factory, just so that we can override thread name for easy debugging ThreadFactory threadFactory = new ThreadFactory() { @Override/*from w w w .j a v a 2s.c o m*/ public Thread newThread(Runnable r) { Thread th = defaultFactory.newThread(r); th.setName("estimate-" + thId.incrementAndGet()); return th; } }; log.debug("starting estimate pool"); ThreadPoolExecutor estimatePool = new ThreadPoolExecutor(minPoolSize, maxPoolSize, keepAlive, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), threadFactory); estimatePool.allowCoreThreadTimeOut(false); estimatePool.prestartCoreThread(); this.estimatePool = estimatePool; }
From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java
private void startLauncherPool() { int minPoolSize = conf.getInt(LAUNCHER_POOL_MIN_THREADS, DEFAULT_LAUNCHER_POOL_MIN_THREADS); int maxPoolSize = conf.getInt(LAUNCHER_POOL_MAX_THREADS, DEFAULT_LAUNCHER_POOL_MAX_THREADS); int keepAlive = conf.getInt(LAUNCHER_POOL_KEEP_ALIVE_MILLIS, DEFAULT_LAUNCHER_POOL_KEEP_ALIVE_MILLIS); final ThreadFactory defaultFactory = Executors.defaultThreadFactory(); final AtomicInteger thId = new AtomicInteger(); // We are creating our own thread factory, just so that we can override thread name for easy debugging ThreadFactory threadFactory = new ThreadFactory() { @Override/* w ww .ja va 2 s. c o m*/ public Thread newThread(Runnable r) { Thread th = defaultFactory.newThread(r); th.setName("launcher-" + thId.incrementAndGet()); return th; } }; log.debug("starting query launcher pool"); ThreadPoolExecutor launcherPool = new ThreadPoolExecutor(minPoolSize, maxPoolSize, keepAlive, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), threadFactory); launcherPool.allowCoreThreadTimeOut(false); launcherPool.prestartCoreThread(); this.queryLauncherPool = launcherPool; }
From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java
private ExecutorService getShutdownAllExecutorService(int size) { final ThreadGroup thrGrp = LoggingThreadGroup.createThreadGroup("ShutdownAllGroup", logger); ThreadFactory thrFactory = new ThreadFactory() { private final AtomicInteger threadCount = new AtomicInteger(1); public Thread newThread(Runnable r) { Thread t = new Thread(thrGrp, r, "ShutdownAll-" + threadCount.getAndIncrement()); t.setDaemon(true);//from w ww . j av a 2s .c o m return t; } }; ExecutorService es = Executors.newFixedThreadPool(shutdownAllPoolSize == -1 ? size : shutdownAllPoolSize, thrFactory); return es; }
From source file:org.apache.geode.internal.cache.wan.WANTestBase.java
public static void doMultiThreadedPuts(String regionName, int numPuts) { final AtomicInteger ai = new AtomicInteger(-1); final ExecutorService execService = Executors.newFixedThreadPool(5, new ThreadFactory() { AtomicInteger threadNum = new AtomicInteger(); public Thread newThread(final Runnable r) { Thread result = new Thread(r, "Client Put Thread-" + threadNum.incrementAndGet()); result.setDaemon(true);//from w w w .j a v a 2 s .c o m return result; } }); final Region r = cache.getRegion(Region.SEPARATOR + regionName); assertNotNull(r); List<Callable<Object>> tasks = new ArrayList<Callable<Object>>(); for (long i = 0; i < 5; i++) tasks.add(new PutTask(r, ai, numPuts)); try { List<Future<Object>> l = execService.invokeAll(tasks); for (Future<Object> f : l) f.get(); } catch (InterruptedException e1) { // TODO: eats exception e1.printStackTrace(); } catch (ExecutionException e) { // TODO: eats exceptions e.printStackTrace(); } execService.shutdown(); }