Example usage for java.util.concurrent ScheduledThreadPoolExecutor ScheduledThreadPoolExecutor

List of usage examples for java.util.concurrent ScheduledThreadPoolExecutor ScheduledThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ScheduledThreadPoolExecutor ScheduledThreadPoolExecutor.

Prototype

public ScheduledThreadPoolExecutor(int corePoolSize, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ScheduledThreadPoolExecutor with the given initial parameters.

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.java

public FsDatasetCache(FsDatasetImpl dataset) {
    this.dataset = dataset;
    this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory();
    ThreadFactory workerFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("FsDatasetCache-%d-" + dataset.toString()).build();
    this.usedBytesCount = new UsedBytesCount();
    this.uncachingExecutor = new ThreadPoolExecutor(0, 1, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), workerFactory);
    this.uncachingExecutor.allowCoreThreadTimeOut(true);
    this.deferredUncachingExecutor = new ScheduledThreadPoolExecutor(1, workerFactory);
    this.revocationMs = dataset.datanode.getConf().getLong(DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS,
            DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS_DEFAULT);
    long confRevocationPollingMs = dataset.datanode.getConf().getLong(DFS_DATANODE_CACHE_REVOCATION_POLLING_MS,
            DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT);
    long minRevocationPollingMs = revocationMs / 2;
    if (minRevocationPollingMs < confRevocationPollingMs) {
        throw new RuntimeException("configured value " + confRevocationPollingMs + "for "
                + DFS_DATANODE_CACHE_REVOCATION_POLLING_MS
                + " is too high.  It must not be more than half of the " + "value of "
                + DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS + ".  Reconfigure this to "
                + minRevocationPollingMs);
    }/*  ww  w.j a v  a  2s . c om*/
    this.revocationPollingMs = confRevocationPollingMs;
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.java

ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor(Configuration conf) {
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("LogDeleter #%d").build();
    sched = new ScheduledThreadPoolExecutor(conf.getInt(YarnConfiguration.NM_LOG_DELETION_THREADS_COUNT,
            YarnConfiguration.DEFAULT_NM_LOG_DELETE_THREAD_COUNT), tf);
    return sched;
}

From source file:com.laudandjolynn.mytv.Main.java

/**
 * ?//from  www. j a v  a2s  .c  o  m
 * 
 * @param data
 * @param tvService
 */
private static void createEverydayCron(final MyTvData data, final TvService tvService) {
    ScheduledExecutorService scheduled = new ScheduledThreadPoolExecutor(3,
            new BasicThreadFactory.Builder().namingPattern("Mytv_Scheduled_Task").build());
    Date today = new Date();
    String nextWeek = DateUtils.date2String(DateUtils.nextWeek(today), "yyyy-MM-dd 00:01:00");
    long crawlTaskInitDelay = (DateUtils.string2Date(nextWeek).getTime() - today.getTime()) / 1000;
    logger.info("cron crawler task will be automatic start after " + crawlTaskInitDelay + " seconds at "
            + nextWeek);
    scheduled.scheduleWithFixedDelay(new Runnable() {

        @Override
        public void run() {
            Date[] weeks = DateUtils.getWeek(new Date());
            logger.info("begin to crawl program table of " + Arrays.deepToString(weeks));
            ExecutorService executorService = Executors.newFixedThreadPool(Constant.CPU_PROCESSOR_NUM,
                    new BasicThreadFactory.Builder().namingPattern("Mytv_Schedule_Crawl_Program_Table_%d")
                            .build());
            List<TvStation> stationList = tvService.getDisplayedTvStation();
            for (Date date : weeks) {
                crawlAllProgramTable(stationList, executorService, DateUtils.date2String(date, "yyyy-MM-dd"),
                        tvService);
            }
            executorService.shutdown();
        }
    }, crawlTaskInitDelay, 604860, TimeUnit.SECONDS);

    // ??
    String nextDate = DateUtils.tommorow() + " 23:00:00";
    long commonInitDelay = (DateUtils.string2Date(nextDate).getTime() - today.getTime()) / 1000;
    logger.info("cron refresh proxy task will be automatic start after " + commonInitDelay + " seconds at "
            + nextDate);
    scheduled.scheduleWithFixedDelay(new Runnable() {

        @Override
        public void run() {
            logger.info("begin to refresh proxies.");
            MyTvProxyManager.getInstance().refresh();
        }
    }, commonInitDelay, 86400, TimeUnit.SECONDS);

    // 
    logger.info("cron refresh cache task will be automatic start after " + commonInitDelay + " seconds at "
            + nextDate);
    scheduled.scheduleWithFixedDelay(new Runnable() {

        @Override
        public void run() {
            logger.info("begin to refresh caches.");
            makeCache(tvService);
        }
    }, commonInitDelay, 86400, TimeUnit.SECONDS);

    // scheduled?????
    // scheduled.shutdown();
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.java

public ResourceLocalizationService(Dispatcher dispatcher, ContainerExecutor exec, DeletionService delService,
        LocalDirsHandlerService dirsHandler, Context context) {

    super(ResourceLocalizationService.class.getName());
    this.exec = exec;
    this.dispatcher = dispatcher;
    this.delService = delService;
    this.dirsHandler = dirsHandler;

    this.cacheCleanup = new ScheduledThreadPoolExecutor(1,
            new ThreadFactoryBuilder().setNameFormat("ResourceLocalizationService Cache Cleanup").build());
    this.stateStore = context.getNMStateStore();
    this.nmContext = context;
}

From source file:org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.java

DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
    this.datanode = datanode;
    this.dataset = dataset;
    int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
            DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
    scanPeriodMsecs = interval * 1000L; //msec
    int threads = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
            DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

    reportCompileThreadPool = Executors.newFixedThreadPool(threads, new Daemon.DaemonFactory());
    masterThread = new ScheduledThreadPoolExecutor(1, new Daemon.DaemonFactory());
}

From source file:org.rhq.core.pc.inventory.InventoryManager.java

/**
 * @see ContainerService#initialize()//w ww  .  j  ava  2 s . c  o  m
 */
public void initialize() {
    inventoryLock.writeLock().lock();

    try {
        log.info("Initializing Inventory Manager...");

        this.discoveryComponentProxyFactory = new DiscoveryComponentProxyFactory();
        this.discoveryComponentProxyFactory.initialize();

        this.agent = new Agent(this.configuration.getContainerName(), null, 0, null, null);

        //make sure the avail collectors are available before we instantiate any 
        //resource context - either from disk or from anywhere else.
        availabilityCollectors = new AvailabilityCollectorThreadPool();
        availabilityCollectors.initialize();

        if (configuration.isInsideAgent()) {
            loadFromDisk();
        }

        // Discover the platform first thing.
        executePlatformScan();

        //try the resource upgrade before we have any schedulers set up
        //so that we don't get any interventions from concurrently running
        //discoveries.
        activateAndUpgradeResources();

        // Never run more than one avail check at a time.
        availabilityThreadPoolExecutor = new ScheduledThreadPoolExecutor(AVAIL_THREAD_POOL_CORE_POOL_SIZE,
                new LoggingThreadFactory(AVAIL_THREAD_POOL_NAME, true));
        availabilityExecutor = new AvailabilityExecutor(this);

        // Never run more than one discovery scan at a time (service and service scans share the same pool).
        inventoryThreadPoolExecutor = new ScheduledThreadPoolExecutor(1,
                new LoggingThreadFactory(INVENTORY_THREAD_POOL_NAME, true));
        serverScanExecutor = new AutoDiscoveryExecutor(null, this, configuration);
        serviceScanExecutor = new RuntimeDiscoveryExecutor(this, configuration);

        // Only schedule periodic discovery scans and avail checks if we are running inside the RHQ Agent (versus
        // inside EmbJopr).
        if (configuration.isInsideAgent()) {
            // After an initial delay (5s by default), periodically run an availability check (every 1m by default).
            availabilityThreadPoolExecutor.scheduleWithFixedDelay(availabilityExecutor,
                    configuration.getAvailabilityScanInitialDelay(), configuration.getAvailabilityScanPeriod(),
                    TimeUnit.SECONDS);

            // After an initial delay (10s by default), periodically run a server discovery scan (every 15m by default).
            inventoryThreadPoolExecutor.scheduleWithFixedDelay(serverScanExecutor,
                    configuration.getServerDiscoveryInitialDelay(), configuration.getServerDiscoveryPeriod(),
                    TimeUnit.SECONDS);

            // After an initial delay (20s by default), periodically run a service discovery scan (every 1d by default).
            inventoryThreadPoolExecutor.scheduleWithFixedDelay(serviceScanExecutor,
                    configuration.getServiceDiscoveryInitialDelay(), configuration.getServiceDiscoveryPeriod(),
                    TimeUnit.SECONDS);
        }
    } finally {
        inventoryLock.writeLock().unlock();
    }

    log.info("Inventory Manager initialized.");
}

From source file:org.languagetool.gui.LanguageToolSupport.java

private void init() {
    try {/*w  w w . ja v  a  2 s.co m*/
        config = new Configuration(new File(System.getProperty("user.home")), CONFIG_FILE, null);
    } catch (IOException ex) {
        throw new RuntimeException("Could not load configuration", ex);
    }

    Language defaultLanguage = config.getLanguage();
    if (defaultLanguage == null) {
        defaultLanguage = Languages.getLanguageForLocale(Locale.getDefault());
    }

    /**
     * Warm-up: we have a lot of lazy init in LT, which causes the first check to
     * be very slow (several seconds) for languages with a lot of data and a lot of
     * rules. We just assume that the default language is the language that the user
     * often uses and init the LT object for that now, not just when it's first used.
     * This makes the first check feel much faster:
     */
    reloadLanguageTool(defaultLanguage);

    checkExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setDaemon(true);
            t.setPriority(Thread.MIN_PRIORITY);
            t.setName(t.getName() + "-lt-background");
            return t;
        }
    });

    check = new AtomicInteger(0);

    this.textComponent.getDocument().addDocumentListener(new DocumentListener() {
        @Override
        public void insertUpdate(DocumentEvent e) {
            mustDetectLanguage = config.getAutoDetect();
            recalculateSpans(e.getOffset(), e.getLength(), false);
            if (backgroundCheckEnabled) {
                checkDelayed(null);
            }
        }

        @Override
        public void removeUpdate(DocumentEvent e) {
            mustDetectLanguage = config.getAutoDetect();
            recalculateSpans(e.getOffset(), e.getLength(), true);
            if (backgroundCheckEnabled) {
                checkDelayed(null);
            }
        }

        @Override
        public void changedUpdate(DocumentEvent e) {
            mustDetectLanguage = config.getAutoDetect();
            if (backgroundCheckEnabled) {
                checkDelayed(null);
            }
        }
    });

    mouseListener = new MouseListener() {
        @Override
        public void mouseClicked(MouseEvent me) {
        }

        @Override
        public void mousePressed(MouseEvent me) {
            if (me.isPopupTrigger()) {
                showPopup(me);
            }
        }

        @Override
        public void mouseReleased(MouseEvent me) {
            if (me.isPopupTrigger()) {
                showPopup(me);
            }
        }

        @Override
        public void mouseEntered(MouseEvent me) {
        }

        @Override
        public void mouseExited(MouseEvent me) {
        }
    };
    this.textComponent.addMouseListener(mouseListener);

    actionListener = e -> _actionPerformed(e);

    mustDetectLanguage = config.getAutoDetect();
    if (!this.textComponent.getText().isEmpty() && backgroundCheckEnabled) {
        checkImmediately(null);
    }
}

From source file:org.sleuthkit.autopsy.experimental.autoingest.AutoIngestManager.java

/**
 * Constructs an auto ingest manager responsible for processing auto ingest
 * jobs defined by manifest files that can be added to any level of a
 * designated input directory tree.//  w  ww.j av a2  s.  com
 */
private AutoIngestManager() {
    SYS_LOGGER.log(Level.INFO, "Initializing auto ingest");
    state = State.IDLE;
    eventPublisher = new AutopsyEventPublisher();
    scanMonitor = new Object();
    inputScanSchedulingExecutor = new ScheduledThreadPoolExecutor(NUM_INPUT_SCAN_SCHEDULING_THREADS,
            new ThreadFactoryBuilder().setNameFormat(INPUT_SCAN_SCHEDULER_THREAD_NAME).build());
    inputScanExecutor = Executors
            .newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat(INPUT_SCAN_THREAD_NAME).build());
    jobProcessingExecutor = Executors
            .newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat(AUTO_INGEST_THREAD_NAME).build());
    jobStatusPublishingExecutor = new ScheduledThreadPoolExecutor(1,
            new ThreadFactoryBuilder().setNameFormat(JOB_STATUS_PUBLISHING_THREAD_NAME).build());
    hostNamesToRunningJobs = new ConcurrentHashMap<>();
    hostNamesToLastMsgTime = new ConcurrentHashMap<>();
    jobsLock = new Object();
    casesToManifests = new HashMap<>();
    pendingJobs = new ArrayList<>();
    completedJobs = new ArrayList<>();
}

From source file:org.apache.jackrabbit.oak.jcr.repository.RepositoryImpl.java

private static ScheduledExecutorService createListeningScheduledExecutorService() {
    ThreadFactory tf = new ThreadFactory() {
        private final AtomicLong counter = new AtomicLong();

        @Override/* w  ww.j  a va2s .  co  m*/
        public Thread newThread(@Nonnull Runnable r) {
            Thread t = new Thread(r, newName());
            t.setDaemon(true);
            return t;
        }

        private String newName() {
            return "oak-repository-executor-" + counter.incrementAndGet();
        }
    };
    return new ScheduledThreadPoolExecutor(1, tf) {
        // purge the list of schedule tasks before scheduling a new task in order
        // to reduce memory consumption in the face of many cancelled tasks. See OAK-1890.

        @Override
        public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) {
            purge();
            return super.schedule(callable, delay, unit);
        }

        @Override
        public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
            purge();
            return super.schedule(command, delay, unit);
        }

        @Override
        public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period,
                TimeUnit unit) {
            purge();
            return super.scheduleAtFixedRate(command, initialDelay, period, unit);
        }

        @Override
        public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay,
                TimeUnit unit) {
            purge();
            return super.scheduleWithFixedDelay(command, initialDelay, delay, unit);
        }
    };
}

From source file:org.apache.accumulo.core.clientImpl.ConditionalWriterImpl.java

ConditionalWriterImpl(ClientContext context, TableId tableId, ConditionalWriterConfig config) {
    this.context = context;
    this.auths = config.getAuthorizations();
    this.ve = new VisibilityEvaluator(config.getAuthorizations());
    this.threadPool = new ScheduledThreadPoolExecutor(config.getMaxWriteThreads(),
            new NamingThreadFactory(this.getClass().getSimpleName()));
    this.locator = new SyncingTabletLocator(context, tableId);
    this.serverQueues = new HashMap<>();
    this.tableId = tableId;
    this.timeout = config.getTimeout(TimeUnit.MILLISECONDS);
    this.durability = config.getDurability();
    this.classLoaderContext = config.getClassLoaderContext();

    Runnable failureHandler = () -> {
        List<QCMutation> mutations = new ArrayList<>();
        failedMutations.drainTo(mutations);
        if (mutations.size() > 0)
            queue(mutations);/*from  ww  w . j a v  a  2  s .  c om*/
    };

    failureHandler = new LoggingRunnable(log, failureHandler);

    threadPool.scheduleAtFixedRate(failureHandler, 250, 250, TimeUnit.MILLISECONDS);
}