Example usage for java.util.concurrent ThreadFactory ThreadFactory

List of usage examples for java.util.concurrent ThreadFactory ThreadFactory

Introduction

In this page you can find the example usage for java.util.concurrent ThreadFactory ThreadFactory.

Prototype

ThreadFactory

Source Link

Usage

From source file:org.scale7.cassandra.pelops.pool.CommonsBackedPool.java

private void configureScheduledTasks() {
    if (policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis() > 0) {
        if (policy.isRunMaintenanceTaskDuringInit()) {
            logger.info("Running maintenance tasks during initialization...");
            runMaintenanceTasks();//from ww  w.  j  a  v  a2s  .  c om
        }

        if (Policy.MIN_TIME_BETWEEN_SCHEDULED_TASKS >= policy
                .getTimeBetweenScheduledMaintenanceTaskRunsMillis()) {
            logger.warn(
                    "Setting the scheduled tasks to run less than every {} milliseconds is not a good idea...",
                    Policy.MIN_TIME_BETWEEN_SCHEDULED_TASKS);
        }

        logger.info("Configuring scheduled tasks to run every {} milliseconds",
                policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis());
        executorService = Executors.newScheduledThreadPool(1, new ThreadFactory() {
            @Override
            public Thread newThread(Runnable runnable) {
                Thread thread = new Thread(runnable, "pelops-pool-worker-" + getKeyspace());
                thread.setDaemon(true); // don't make the JVM wait for this thread to exit
                thread.setPriority(Thread.MIN_PRIORITY + 1); // try not to disrupt other threads
                return thread;
            }
        });

        executorService.scheduleWithFixedDelay(new Runnable() {
            @Override
            public void run() {
                logger.debug("Background thread running maintenance tasks");
                try {
                    runMaintenanceTasks();
                } catch (Exception e) {
                    logger.warn("An exception was thrown while running the maintenance tasks", e);
                }
            }
        }, policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis(),
                policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis(), TimeUnit.MILLISECONDS);
    } else {
        logger.warn("Disabling maintenance tasks; dynamic node discovery, node suspension, idle connection "
                + "termination and some running statistics will not be available to this pool.");
    }
}

From source file:org.apache.nifi.minifi.bootstrap.configuration.ingestors.FileChangeIngestor.java

@Override
public void start() {
    executorService = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override/*from ww w  . j a v  a  2s  .c om*/
        public Thread newThread(final Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setName("File Change Notifier Thread");
            t.setDaemon(true);
            return t;
        }
    });
    this.executorService.scheduleWithFixedDelay(this, 0, pollingSeconds, DEFAULT_POLLING_PERIOD_UNIT);
}

From source file:com.clustercontrol.agent.custom.CommandCollector.java

@Override
public void start() {
    // determine startup delay (using monitorId for random seed)
    int delay = new Random(config.getMonitorId().hashCode()).nextInt(config.getInterval());

    // determine startup time
    ///*  ww  w  .ja va2s  . co m*/
    // example)
    //   delay : 15 [sec]
    //   interval : 300 [sec]
    //   now : 2000-1-1 00:00:10
    //   best startup : 2000-1-1 00:00:15
    long now = HinemosTime.currentTimeMillis();
    long startup = config.getInterval() * (now / config.getInterval()) + delay;
    startup = startup > now ? startup : startup + config.getInterval();

    log.info("scheduling command collector. (" + this + ", startup = "
            + String.format("%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS", new Date(startup)) + ", interval = "
            + config.getInterval() + " [msec])");

    // initialize scheduler thread
    _scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        private volatile int _count = 0;

        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r, "CommandCollectorScheduler-" + _count++);
        }
    });

    // start scheduler
    // when agent startup. this is called twice. first execution is after interval to avoid double execution.
    _scheduler.scheduleWithFixedDelay(this, startup - now, config.getInterval(), TimeUnit.MILLISECONDS);
}

From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java

/**
 * Creates an instance.//from   w  ww  .j a v  a2  s  .  com
 *
 * @param corePoolSize core size of the thread pool
 * @param maxPoolSize the max number of threads in the thread pool
 * @param maxConcurrentRequests maximum number of concurrent requests
 * @param client a client for making requests
 * @param clusterCoordinator the cluster coordinator to use for interacting with node statuses
 * @param connectionTimeout the connection timeout specified in milliseconds
 * @param readTimeout the read timeout specified in milliseconds
 * @param callback a callback that will be called whenever all of the responses have been gathered for a request. May be null.
 * @param eventReporter an EventReporter that can be used to notify users of interesting events. May be null.
 * @param nifiProperties properties
 */
public ThreadPoolRequestReplicator(final int corePoolSize, final int maxPoolSize,
        final int maxConcurrentRequests, final Client client, final ClusterCoordinator clusterCoordinator,
        final String connectionTimeout, final String readTimeout, final RequestCompletionCallback callback,
        final EventReporter eventReporter, final NiFiProperties nifiProperties) {
    if (corePoolSize <= 0) {
        throw new IllegalArgumentException("The Core Pool Size must be greater than zero.");
    } else if (maxPoolSize < corePoolSize) {
        throw new IllegalArgumentException("Max Pool Size must be >= Core Pool Size.");
    } else if (client == null) {
        throw new IllegalArgumentException("Client may not be null.");
    }

    this.client = client;
    this.clusterCoordinator = clusterCoordinator;
    this.connectionTimeoutMs = (int) FormatUtils.getTimeDuration(connectionTimeout, TimeUnit.MILLISECONDS);
    this.readTimeoutMs = (int) FormatUtils.getTimeDuration(readTimeout, TimeUnit.MILLISECONDS);
    this.maxConcurrentRequests = maxConcurrentRequests;
    this.responseMapper = new StandardHttpResponseMapper(nifiProperties);
    this.eventReporter = eventReporter;
    this.callback = callback;
    this.nifiProperties = nifiProperties;

    client.property(ClientProperties.CONNECT_TIMEOUT, connectionTimeoutMs);
    client.property(ClientProperties.READ_TIMEOUT, readTimeoutMs);
    client.property(ClientProperties.FOLLOW_REDIRECTS, Boolean.TRUE);

    final AtomicInteger threadId = new AtomicInteger(0);
    final ThreadFactory threadFactory = r -> {
        final Thread t = Executors.defaultThreadFactory().newThread(r);
        t.setDaemon(true);
        t.setName("Replicate Request Thread-" + threadId.incrementAndGet());
        return t;
    };

    executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 5, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), threadFactory);

    maintenanceExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override
        public Thread newThread(final Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);
            t.setName(ThreadPoolRequestReplicator.class.getSimpleName() + " Maintenance Thread");
            return t;
        }
    });

    maintenanceExecutor.scheduleWithFixedDelay(() -> purgeExpiredRequests(), 1, 1, TimeUnit.SECONDS);
}

From source file:org.apache.nifi.remote.util.SiteToSiteRestApiClient.java

public SiteToSiteRestApiClient(final SSLContext sslContext, final HttpProxy proxy,
        final EventReporter eventReporter) {
    this.sslContext = sslContext;
    this.proxy = proxy;
    this.eventReporter = eventReporter;

    ttlExtendTaskExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        private final ThreadFactory defaultFactory = Executors.defaultThreadFactory();

        @Override// w  w  w  .j  ava2  s . c o  m
        public Thread newThread(final Runnable r) {
            final Thread thread = defaultFactory.newThread(r);
            thread.setName(Thread.currentThread().getName() + " TTLExtend");
            thread.setDaemon(true);
            return thread;
        }
    });
}

From source file:mondrian.olap.Util.java

/**
 * Creates an {@link ExecutorService} object backed by a thread pool.
 * @param maximumPoolSize Maximum number of concurrent
 * threads./*w w  w .j a  v  a2  s  . c  o m*/
 * @param corePoolSize Minimum number of concurrent
 * threads to maintain in the pool, even if they are
 * idle.
 * @param keepAliveTime Time, in seconds, for which to
 * keep alive unused threads.
 * @param name The name of the threads.
 * @param rejectionPolicy The rejection policy to enforce.
 * @return An executor service preconfigured.
 */
public static ExecutorService getExecutorService(int maximumPoolSize, int corePoolSize, long keepAliveTime,
        final String name, RejectedExecutionHandler rejectionPolicy) {
    if (Util.PreJdk16) {
        // On JDK1.5, if you specify corePoolSize=0, nothing gets executed.
        // Bummer.
        corePoolSize = Math.max(corePoolSize, 1);
    }

    // We must create a factory where the threads
    // have the right name and are marked as daemon threads.
    final ThreadFactory factory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger(0);

        public Thread newThread(Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);
            t.setName(name + '_' + counter.incrementAndGet());
            return t;
        }
    };

    // Ok, create the executor
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(corePoolSize,
            maximumPoolSize > 0 ? maximumPoolSize : Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS,
            // we use a sync queue. any other type of queue
            // will prevent the tasks from running concurrently
            // because the executors API requires blocking queues.
            // Important to pass true here. This makes the
            // order of tasks deterministic.
            // TODO Write a non-blocking queue which implements
            // the blocking queue API so we can pass that to the
            // executor.
            new SynchronousQueue<Runnable>(true), factory);

    // Set the rejection policy if required.
    if (rejectionPolicy != null) {
        executor.setRejectedExecutionHandler(rejectionPolicy);
    }

    // Done
    return executor;
}

From source file:org.jumpmind.symmetric.service.impl.NodeCommunicationService.java

protected ThreadPoolExecutor getExecutor(final CommunicationType communicationType) {
    ThreadPoolExecutor service = executors.get(communicationType);

    String threadCountParameter = "";
    switch (communicationType) {
    case PULL://from   ww  w.  j  a va  2 s.  c  om
        threadCountParameter = ParameterConstants.PULL_THREAD_COUNT_PER_SERVER;
        break;
    case PUSH:
        threadCountParameter = ParameterConstants.PUSH_THREAD_COUNT_PER_SERVER;
        break;
    case FILE_PULL:
        threadCountParameter = ParameterConstants.FILE_PUSH_THREAD_COUNT_PER_SERVER;
        break;
    case FILE_PUSH:
        threadCountParameter = ParameterConstants.FILE_PUSH_THREAD_COUNT_PER_SERVER;
        break;
    case EXTRACT:
        threadCountParameter = ParameterConstants.INITIAL_LOAD_EXTRACT_THREAD_COUNT_PER_SERVER;
        break;
    default:
        break;
    }
    int threadCount = parameterService.getInt(threadCountParameter, 1);

    if (service != null && service.getCorePoolSize() != threadCount) {
        log.info("{} has changed from {} to {}.  Restarting thread pool",
                new Object[] { threadCountParameter, service.getCorePoolSize(), threadCount });
        stop();
        service = null;
    }

    if (service == null) {
        synchronized (this) {
            service = executors.get(communicationType);
            if (service == null) {
                if (threadCount <= 0) {
                    log.warn("{}={} is not a valid value. Defaulting to 1", threadCountParameter, threadCount);
                    threadCount = 1;
                } else if (threadCount > 1) {
                    log.info("{} will use {} threads", communicationType.name().toLowerCase(), threadCount);
                }
                service = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount, new ThreadFactory() {
                    final AtomicInteger threadNumber = new AtomicInteger(1);
                    final String namePrefix = parameterService.getEngineName().toLowerCase() + "-"
                            + communicationType.name().toLowerCase() + "-";

                    public Thread newThread(Runnable r) {
                        Thread t = new Thread(r);
                        t.setName(namePrefix + threadNumber.getAndIncrement());
                        if (t.isDaemon()) {
                            t.setDaemon(false);
                        }
                        if (t.getPriority() != Thread.NORM_PRIORITY) {
                            t.setPriority(Thread.NORM_PRIORITY);
                        }
                        return t;
                    }
                });
                executors.put(communicationType, service);
            }
        }
    }
    return service;
}

From source file:ws.wamp.jawampa.WampRouter.java

WampRouter(Map<String, RealmConfig> realms) {

    // Populate the realms from the configuration
    this.realms = new HashMap<String, Realm>();
    for (Map.Entry<String, RealmConfig> e : realms.entrySet()) {
        Realm info = new Realm(e.getValue());
        this.realms.put(e.getKey(), info);
    }/*  www .  j  av  a2  s. c  o m*/

    // Create an eventloop and the RX scheduler on top of it
    this.eventLoop = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r, "WampRouterEventLoop");
            t.setDaemon(true);
            return t;
        }
    });
    this.scheduler = Schedulers.from(eventLoop);

    idleChannels = new HashSet<IConnectionController>();
}

From source file:com.alibaba.wasp.master.AssignmentManager.java

/**
 * Get a named {@link java.util.concurrent.ThreadFactory} that just builds daemon threads
 *
 * @param prefix/*from w  w  w .j  a v a2s.c  o  m*/
 *          name prefix for all threads created from the factory
 * @return a thread factory that creates named, daemon threads
 */
private static ThreadFactory newDaemonThreadFactory(final String prefix) {
    final ThreadFactory namedFactory = Threads.getNamedThreadFactory(prefix);
    return new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = namedFactory.newThread(r);
            if (!t.isDaemon()) {
                t.setDaemon(true);
            }
            if (t.getPriority() != Thread.NORM_PRIORITY) {
                t.setPriority(Thread.NORM_PRIORITY);
            }
            return t;
        }
    };
}

From source file:com.ibm.jaggr.core.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk.//from   ww  w.  j av  a  2  s. c o m
 *
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization
 *            resources that are check on every server restart
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    final String sourceMethod = "<ctor>"; //$NON-NLS-1$
    boolean isTraceLogging = log.isLoggable(Level.FINER);
    if (isTraceLogging) {
        log.entering(DepTree.class.getName(), sourceMethod,
                new Object[] { paths, aggregator, stamp, clean, validateDeps });
    }
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();
    cacheBust = AggregatorUtil.getCacheBust(aggregator);

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                if (isTraceLogging) {
                    log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$
                }
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (isTraceLogging) {
                log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$
            }
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
        if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) {
            if (isTraceLogging) {
                log.finer("Current config = " + rawConfig); //$NON-NLS-1$
                log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$
            }
            validateDeps = true;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && !validateDeps && !clean) {
        depMap = cached.depMap;
        fromCache = true;
        return;
    } else if (isTraceLogging) {
        log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$
                + ", clean = " + clean); //$NON-NLS-1$
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator));
    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (folder nodes with no children)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        if (isTraceLogging) {
            log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$
        }
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    if (isTraceLogging) {
        log.exiting(DepTree.class.getName(), sourceMethod);
    }
}