List of usage examples for java.util.concurrent ThreadFactory ThreadFactory
ThreadFactory
From source file:com.clustercontrol.winsyslog.SyslogReceiver.java
private ExecutorService createExecutorService(final String name) { ExecutorService executor = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override//from www. j a v a 2 s.c om public Thread newThread(Runnable r) { return new Thread(r, name); } }); return executor; }
From source file:com.hellblazer.jackal.configuration.ThreadConfig.java
@Bean(name = "communicationsDispatchers") @Primary/*from w w w . j a v a 2 s. c om*/ @Autowired public ExecutorService communicationsDispatchers(Identity partitionIdentity) { final int id = partitionIdentity.id; return Executors.newCachedThreadPool(new ThreadFactory() { int count = 0; @Override public Thread newThread(Runnable target) { Thread t = new Thread(target, String.format("Communications Dispatcher[%s] for node[%s]", count++, id)); t.setDaemon(true); t.setUncaughtExceptionHandler(new UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { log.error(String.format("Exception on %s", t), e); } }); return t; } }); }
From source file:org.apache.asterix.experiment.client.SocketTweetGenerator.java
public SocketTweetGenerator(SocketTweetGeneratorConfig config) { threadPool = Executors.newCachedThreadPool(new ThreadFactory() { private final AtomicInteger count = new AtomicInteger(); @Override/* w ww .j a va 2 s . com*/ public Thread newThread(Runnable r) { int tid = count.getAndIncrement(); Thread t = new Thread(r, "DataGeneratorThread: " + tid); t.setDaemon(true); return t; } }); partitionRangeStart = config.getPartitionRangeStart(); dataGenDuration = config.getDataGenDuration(); queryGenDuration = config.getQueryGenDuration(); startDataInterval = config.getDataInterval(); nDataIntervals = config.getNIntervals(); orchHost = config.getOrchestratorHost(); orchPort = config.getOrchestratorPort(); receiverAddresses = config.getAddresses(); mode = startDataInterval > 0 ? Mode.DATA : Mode.TIME; openStreetMapFilePath = config.getOpenStreetMapFilePath(); locationSampleInterval = config.getLocationSampleInterval(); recordCountPerBatchDuringIngestionOnly = config.getRecordCountPerBatchDuringIngestionOnly(); recordCountPerBatchDuringQuery = config.getRecordCountPerBatchDuringQuery(); dataGenSleepTimeDuringIngestionOnly = config.getDataGenSleepTimeDuringIngestionOnly(); dataGenSleepTimeDuringQuery = config.getDataGenSleepTimeDuringQuery(); }
From source file:eu.interedition.collatex.tools.CollationServer.java
public CollationServer(int maxParallelCollations, int maxCollationSize, String dotPath) { this.collationThreads = Executors.newFixedThreadPool(maxParallelCollations, new ThreadFactory() { private final AtomicLong counter = new AtomicLong(); @Override//from w w w .j a v a 2s .c om public Thread newThread(Runnable r) { final Thread t = new Thread(r, "collator-" + counter.incrementAndGet()); t.setDaemon(true); t.setPriority(Thread.MIN_PRIORITY); return t; } }); this.maxCollationSize = maxCollationSize; this.dotPath = dotPath; }
From source file:com.aol.advertising.qiao.util.CommonUtils.java
public static ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor(final int poolSz, final String threadName) { return new ScheduledThreadPoolExecutor(poolSz, new ThreadFactory() { private AtomicInteger threadNum = new AtomicInteger(0); @Override//from w w w. j a va2s . c om public Thread newThread(Runnable r) { if (poolSz == 1) return new Thread(r, threadName); else return new Thread(r, threadName + threadNum.incrementAndGet()); } }); }
From source file:org.apache.hadoop.util.AsyncDiskService.java
/** * Create a AsyncDiskServices with a set of volumes (specified by their * root directories)./*from w w w . j a v a 2s . c o m*/ * * The AsyncDiskServices uses one ThreadPool per volume to do the async * disk operations. * * @param volumes The roots of the file system volumes. */ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } }; // Create one ThreadPool per volume for (int v = 0; v < volumes.length; v++) { ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volumes[v], executor); } }
From source file:org.springframework.boot.devtools.livereload.LiveReloadServer.java
/** * Create a new {@link LiveReloadServer} listening on the specified port. * @param port the listen port//from w ww .j av a 2 s .c o m */ public LiveReloadServer(int port) { this(port, new ThreadFactory() { @Override public Thread newThread(Runnable runnable) { return new Thread(runnable); } }); }
From source file:org.apache.synapse.commons.throttle.core.ThrottleDistributedInstancesCleanupTask.java
public ThrottleDistributedInstancesCleanupTask() { throttleProperties = ThrottleServiceDataHolder.getInstance().getThrottleProperties(); cleanUpPoolSize = Integer.parseInt(throttleProperties.getThrottleDistributedCleanupPoolSize()); noOfTimestampObjectToBeCleared = Long.parseLong(throttleProperties.getThrottleDistributedCleanupAmount()); distributedCleanupEnabled = Boolean .parseBoolean(throttleProperties.getThrottleDistributedCleanupTaskEnable()); maxNonAssociatedCounterCountToClear = Integer .parseInt(throttleProperties.getMaxNonAssociatedCounterCleanupAmount()); if (log.isDebugEnabled()) { log.debug("Throttle window replicator pool size set to " + cleanUpPoolSize); }//from ww w . j a v a 2 s .c om if (distributedCleanupEnabled) { ScheduledExecutorService executor = Executors.newScheduledThreadPool(cleanUpPoolSize, new ThreadFactory() { public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName("Throttle " + "Distributed Cleanup" + " Task"); return t; } }); String throttleFrequency = throttleProperties.getThrottleContextDistributedCleanupTaskFrequency(); String distributedInstanceExpiry = throttleProperties .getThrottleContextDistributedExpiredInstanceTime(); if (log.isDebugEnabled()) { log.debug("Throttling Cleanup Task Frequency set to " + throttleFrequency); } executor.scheduleAtFixedRate(new CleanupTask(), Integer.parseInt(throttleFrequency), Integer.parseInt(throttleFrequency), TimeUnit.MILLISECONDS); distributedInstanceExpiryMillis = Long.parseLong(distributedInstanceExpiry); } }
From source file:com.netflix.zeno.diff.TypeDiffOperation.java
@SuppressWarnings("unchecked") public TypeDiff<T> performDiff(DiffSerializationFramework framework, Iterable<T> fromState, Iterable<T> toState, int numThreads) { Map<Object, T> fromStateObjects = new HashMap<Object, T>(); for (T obj : fromState) { fromStateObjects.put(instruction.getKey(obj), obj); }//from ww w .j ava 2 s . c om ArrayList<List<T>> perProcessorWorkList = new ArrayList<List<T>>(numThreads); // each entry is a job for (int i = 0; i < numThreads; ++i) { perProcessorWorkList.add(new ArrayList<T>()); } Map<Object, Object> toStateKeys = new ConcurrentHashMap<Object, Object>(); int toIncrCount = 0; for (T toObject : toState) { perProcessorWorkList.get(toIncrCount % numThreads).add(toObject); toIncrCount++; } ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactory() { @Override public Thread newThread(Runnable r) { final Thread thread = new Thread(r, "TypeDiff_" + instruction.getTypeIdentifier()); thread.setDaemon(true); return thread; } }); try { ArrayList<Future<TypeDiff<T>>> workResultList = new ArrayList<Future<TypeDiff<T>>>( perProcessorWorkList.size()); for (final List<T> workList : perProcessorWorkList) { if (workList != null && !workList.isEmpty()) { workResultList.add(executor.submit(new TypeDiffCallable<T>(framework, instruction, fromStateObjects, toStateKeys, workList))); } } TypeDiff<T> mergedDiff = new TypeDiff<T>(instruction.getTypeIdentifier()); for (final Future<TypeDiff<T>> future : workResultList) { try { TypeDiff<T> typeDiff = future.get(); mergeTypeDiff(mergedDiff, typeDiff); } catch (Exception e) { throw new RuntimeException(e); } } for (Map.Entry<Object, T> entry : fromStateObjects.entrySet()) { mergedDiff.incrementFrom(); if (!toStateKeys.containsKey(entry.getKey())) mergedDiff.addExtraInFrom(entry.getValue()); } return mergedDiff; } finally { executor.shutdownNow(); } }
From source file:com.netflix.iep.http.RxHttpTest.java
@BeforeClass public static void startServer() throws Exception { rxHttp.start();/* w w w . j a va2 s .c om*/ server = HttpServer.create(new InetSocketAddress(0), 100); server.setExecutor(Executors.newFixedThreadPool(10, new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(r, "HttpServer"); } })); port = server.getAddress().getPort(); server.createContext("/empty", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { ignore(exchange.getRequestBody()); int port = exchange.getRemoteAddress().getPort(); exchange.getResponseHeaders().add("X-Test-Port", "" + port); statusCounts.incrementAndGet(statusCode.get()); exchange.sendResponseHeaders(statusCode.get(), -1L); exchange.close(); } }); server.createContext("/echo", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { Headers headers = exchange.getRequestHeaders(); int contentLength = Integer.parseInt(headers.getFirst("Content-Length")); String contentEnc = headers.getFirst("Content-Encoding"); if (contentEnc != null) { exchange.getResponseHeaders().add("Content-Encoding", contentEnc); } int code = statusCode.get(); if (contentLength > 512 && !"gzip".equals(contentEnc)) { code = 400; } statusCounts.incrementAndGet(code); exchange.sendResponseHeaders(code, contentLength); try (InputStream input = exchange.getRequestBody(); OutputStream output = exchange.getResponseBody()) { byte[] buf = new byte[1024]; int length; while ((length = input.read(buf)) > 0) { output.write(buf, 0, length); } } exchange.close(); } }); server.createContext("/relativeRedirect", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { ignore(exchange.getRequestBody()); if (redirects.get() <= 0) { statusCounts.incrementAndGet(statusCode.get()); exchange.getResponseHeaders().add("Location", "/empty"); exchange.sendResponseHeaders(statusCode.get(), -1L); exchange.close(); } else { redirects.decrementAndGet(); statusCounts.incrementAndGet(302); exchange.getResponseHeaders().add("Location", "/relativeRedirect"); exchange.sendResponseHeaders(302, -1L); exchange.close(); } } }); server.createContext("/absoluteRedirect", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { String host = "http://" + exchange.getRequestHeaders().getFirst("Host"); ignore(exchange.getRequestBody()); if (redirects.get() <= 0) { statusCounts.incrementAndGet(302); exchange.getResponseHeaders().add("Location", host + "/empty"); exchange.sendResponseHeaders(302, -1L); exchange.close(); } else { redirects.decrementAndGet(); statusCounts.incrementAndGet(302); exchange.getResponseHeaders().add("Location", host + "/absoluteRedirect"); exchange.sendResponseHeaders(302, -1L); exchange.close(); } } }); server.createContext("/notModified", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { ignore(exchange.getRequestBody()); statusCounts.incrementAndGet(304); exchange.sendResponseHeaders(304, -1L); exchange.close(); } }); server.createContext("/redirectNoLocation", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { ignore(exchange.getRequestBody()); statusCounts.incrementAndGet(302); exchange.sendResponseHeaders(302, -1L); exchange.close(); } }); server.createContext("/readTimeout", new HttpHandler() { @Override public void handle(HttpExchange exchange) throws IOException { ignore(exchange.getRequestBody()); statusCounts.incrementAndGet(statusCode.get()); // So we can track retries Object lock = new Object(); try { synchronized (lock) { lock.wait(); } } catch (InterruptedException e) { e.printStackTrace(); } } }); server.start(); set(client + ".niws.client.MaxAutoRetriesNextServer", "" + retries); set(client + ".niws.client.RetryDelay", "100"); set(client + ".niws.client.ReadTimeout", "1000"); }