Example usage for java.util.concurrent Executors newFixedThreadPool

List of usage examples for java.util.concurrent Executors newFixedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newFixedThreadPool.

Prototype

public static ExecutorService newFixedThreadPool(int nThreads) 

Source Link

Document

Creates a thread pool that reuses a fixed number of threads operating off a shared unbounded queue.

Usage

From source file:com.referencelogic.xmlormupload.main.XmlormuploadMain.java

public void run() {
    try {/*from   w  w  w  .j  a v a2s  .c  o m*/
        XMLConfiguration config = new XMLConfiguration(configFileName);
        String sourceDir = config.getString("source.path");
        List allowedExtensions = config.getList("source.extensions");
        int poolSize = config.getInt("threadpool.size");

        String groovySourceDir = config.getString("domainclasses.path");
        List groovyAllowedExtensions = config.getList("domainclasses.extensions");

        if (isDebugging) {
            log.debug("Loaded configuration successfully. Reading groovy class list from: " + groovySourceDir
                    + " with allowed extensions " + groovyAllowedExtensions);
        }

        if (isDebugging) {
            log.debug("Loaded configuration successfully. Reading file list from: " + sourceDir
                    + " with allowed extensions " + allowedExtensions);
        }
        Iterator iter = FileUtils.iterateFiles(new File(sourceDir),
                (String[]) allowedExtensions.toArray(new String[allowedExtensions.size()]), true);
        if (poolSize < 1) {
            poolSize = 5;
        }

        exec = Executors.newFixedThreadPool(poolSize);

        GroovyClassLoader gcl = new GroovyClassLoader();

        ClassLoader ojcl = Thread.currentThread().getContextClassLoader();

        boolean allFilesResolved = false;
        while (!allFilesResolved) {
            Iterator groovyIter = FileUtils.iterateFiles(new File(groovySourceDir),
                    (String[]) groovyAllowedExtensions.toArray(new String[groovyAllowedExtensions.size()]),
                    true);

            allFilesResolved = true;

            while (groovyIter.hasNext()) {
                File groovyFile = (File) groovyIter.next();
                log.info("Trying to parse file " + groovyFile);
                try {
                    Class clazz = gcl.parseClass(groovyFile);
                } catch (IOException ioe) {
                    log.error("Unable to read file " + groovyFile + " to parse class ", ioe);
                } catch (Exception e) {
                    log.error("Unable to parse file " + groovyFile + " ex:" + e);
                    allFilesResolved = false;
                }
            }

        }

        Thread.currentThread().setContextClassLoader(gcl);

        Configuration hibernateConfig = new Configuration();

        SessionFactory sf;
        if (!matchRegex) {
            sf = hibernateConfig.configure(new File("hibernate.config.xml")).buildSessionFactory();
        } else {
            sf = hibernateConfig.configure(new File("hibernate.update.config.xml")).buildSessionFactory();
        }

        log.info("Opened session");

        while (iter.hasNext()) {
            File file = (File) iter.next();
            String filePath = "";
            try {
                filePath = file.getCanonicalPath();
                log.debug("Canonical path being processed is: " + filePath);
            } catch (IOException ioe) {
                log.warn("Unable to get canonical path from file", ioe);
            }
            log.debug("Is matchRegex true? " + matchRegex);
            log.debug("Does filePath match regexStr?" + filePath.matches(matchRegexStr));
            if ((!matchRegex) || (matchRegex && filePath.matches(matchRegexStr))) {
                exec.execute(new Xmlormuploader(file, config, gcl, sf));
            }
        }

        exec.shutdown();
        try {
            while (!exec.isTerminated()) {
                exec.awaitTermination(30, TimeUnit.SECONDS);
            }
        } catch (InterruptedException ie) {
            // Do nothing, going to close database connection anyway        
        }

        sf.close();

    } catch (ConfigurationException cex) {
        log.fatal("Unable to load config file " + configFileName + " to determine configuration.", cex);
    }
}

From source file:backup.datanode.DataNodeBackupProcessorBase.java

public DataNodeBackupProcessorBase(Configuration conf) throws Exception {
    int backupThreads = conf.getInt(DFS_BACKUP_DATANODE_BACKUP_THREAD_COUNT_KEY,
            DFS_BACKUP_DATANODE_BACKUP_THREAD_COUNT_DEFAULT);
    int queueDepth = conf.getInt(DFS_BACKUP_DATANODE_BACKUP_QUEUE_DEPTH_KEY,
            DFS_BACKUP_DATANODE_BACKUP_QUEUE_DEPTH_DEFAULT);
    _defaultAge = conf.getLong(DFS_BACKUP_DATANODE_BACKUP_AGE_KEY, DFS_BACKUP_DATANODE_BACKUP_AGE_DEFAULT);

    _closer = Closer.create();//  w w w. ja v a2  s  . c  om
    _service = _closer.register(Executors.newFixedThreadPool(backupThreads + 1));
    _backupQueue = new PriorityBlockingQueue<>(queueDepth);

    _backupQueueDepth = Metrics.METRICS.counter(QUEUE_BACKUP);
    _enqueueBackupDropMetric = Metrics.METRICS.histogram(ENQUEUE_BACKUP_DROP);
    _enqueueBackupRetryMetric = Metrics.METRICS.histogram(ENQUEUE_BACKUP_RETRY);
    _backupThroughput = Metrics.METRICS.meter(BACKUP_THROUGHPUT);

    startBackupThreads(backupThreads);
}

From source file:com.mozilla.bagheera.consumer.KafkaConsumer.java

public KafkaConsumer(String topic, Properties props, int numThreads) {
    LOG.info("# of threads: " + numThreads);
    executor = Executors.newFixedThreadPool(numThreads);
    workers = new ArrayList<Future<Void>>(numThreads);

    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    consumerConnector = kafka.consumer.Consumer.createJavaConsumerConnector(consumerConfig);
    streams = consumerConnector.createMessageStreamsByFilter(new Whitelist(topic), numThreads);

    consumed = Metrics.newMeter(new MetricName("bagheera", "consumer", topic + ".consumed"), "messages",
            TimeUnit.SECONDS);//from  w  w w . j a v a2 s  .  c  o m
    invalidMessageMeter = Metrics.newMeter(new MetricName("bagheera", "consumer", topic + ".invalid"),
            "messages", TimeUnit.SECONDS);
}

From source file:com.amour.imagecrawler.ImagesManager.java

/**
 * Run crawler operation in multi-thread
 * @param propertiesManager The Properties-Manager
 * @throws IOException //  ww  w . ja  va2  s . c o  m
 * @throws java.security.NoSuchAlgorithmException 
 */
public void run(Properties propertiesManager) throws IOException, NoSuchAlgorithmException, Exception {

    ExecutorService executor = Executors.newFixedThreadPool(
            Integer.parseInt(propertiesManager.getProperty(Crawler.NUMBER_OF_WORKER_THREADS_KEY)));
    for (String imageUrl : this.imagesList) {

        Runnable worker = new ImageRunable(imageUrl, propertiesManager);
        executor.execute(worker);
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
    }
}

From source file:com.linkedin.pinot.core.query.scheduler.QueryScheduler.java

public QueryScheduler(@Nonnull Configuration schedulerConfig, QueryExecutor queryExecutor) {
    Preconditions.checkNotNull(schedulerConfig);
    numQueryRunnerThreads = schedulerConfig.getInt(QUERY_RUNNER_CONFIG_KEY, DEFAULT_QUERY_RUNNER_THREADS);
    numQueryWorkerThreads = schedulerConfig.getInt(QUERY_WORKER_CONFIG_KEY, DEFAULT_QUERY_WORKER_THREADS);
    queryRunners = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numQueryRunnerThreads));
    queryWorkers = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numQueryWorkerThreads));
    this.queryExecutor = queryExecutor;
}

From source file:io.rhiot.spec.Driver.java

@Override
public Void call() throws Exception {
    LOG.info(this + " started");
    if (transport != null) {
        try {/*from   w w  w  . j  a v a2  s.  com*/
            transport.connect();
        } catch (Exception e) {
            LOG.warn("Error connecting driver " + name, e);
            return null;
        }
    }

    executorService = Executors.newFixedThreadPool(features.size());
    List<Future<Void>> results = executorService.invokeAll(features);
    executorService.shutdown();
    executorService.awaitTermination(5, TimeUnit.SECONDS);
    results.forEach(result -> {
        try {
            result.get();
        } catch (ExecutionException execution) {
            LOG.warn("Exception running driver", execution);
        } catch (Exception interrupted) {
        }
    });

    if (transport != null) {
        try {
            transport.disconnect();
        } catch (Exception e) {
            LOG.warn("Error disconnecting driver " + name, e);
        }
    }
    LOG.info(this + " stopped");
    return null;
}

From source file:com.blacklocus.qs.worker.util.log.SamplingQSLogServiceTest.java

@Test
public void testSampledLifeCycle() throws InterruptedException {

    // With these parameters, by far most logger interactions should be filtered out, very few sampled in.
    final int numThreads = 64, iterations = 200, processingJitterMaxMs = 16, noSoonerThanMs = 100;

    final Set<String> sampledTaskIds = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());

    final QSLogService logService = Mockito.mock(QSLogService.class);
    // track which logging interactions were allowed through (sampled in)
    Mockito.doAnswer(new Answer() {
        @Override/*w  w  w  .  ja v a  2s.com*/
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSTaskModel) invocation.getArguments()[0]).taskId);
            return null;
        }
    }).when(logService).startedTask(Matchers.any(QSTaskModel.class));
    Mockito.doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSLogModel) invocation.getArguments()[0]).taskId);
            return null; //TODO jason
        }
    }).when(logService).log(Matchers.any(QSLogModel.class));
    Mockito.doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSTaskModel) invocation.getArguments()[0]).taskId);
            return null; //TODO jason
        }
    }).when(logService).completedTask(Matchers.any(QSTaskModel.class));

    Predicate<QSTaskModel> taskPredicate = SamplingPredicates.noSoonerThan(noSoonerThanMs,
            TimeUnit.MILLISECONDS);
    final QSLogService sampledLogService = new SamplingQSLogService(logService, taskPredicate);

    long startNs = System.nanoTime();
    ExecutorService threads = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        threads.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                LOG.debug("Thread start {}", Thread.currentThread().getName());
                for (int i = 0; i < iterations; i++) {

                    String taskId = UUID.randomUUID().toString();

                    // simulate task processing, some have logs, some don't, processing time varies between each step
                    QSTaskModel task = new QSTaskModel();
                    task.taskId = taskId;
                    Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));

                    sampledLogService.startedTask(task);
                    Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));

                    // random number of associated logs [0, 2]
                    for (int j = RandomUtils.nextInt(2); j > 0; j--) {
                        QSLogModel log = new QSLogModel();
                        log.taskId = taskId;
                        sampledLogService.log(log);
                        Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));
                    }

                    sampledLogService.completedTask(task);
                }
                LOG.debug("Thread end {}", Thread.currentThread().getName());
                return null;

            }
        });
    }
    threads.shutdown();
    threads.awaitTermination(1, TimeUnit.MINUTES);
    long endNs = System.nanoTime();

    // Theoretical maximum number of sampled in task logging
    long durationMs = TimeUnit.NANOSECONDS.toMillis(endNs - startNs);
    long expectedMax = durationMs / noSoonerThanMs + 1; // +1 for time@0: sampled in
    LOG.debug("Run duration: {}ms  no sooner than: {}ms", durationMs, noSoonerThanMs);
    LOG.debug("Expected max sampled in: {}  Actually sampled: {}", expectedMax, sampledTaskIds.size());
    Assert.assertTrue(expectedMax >= sampledTaskIds.size());
}

From source file:com.netflix.curator.framework.recipes.atomic.TestDistributedAtomicLong.java

@Test
public void testForceSet() throws Exception {
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();/* ww  w  .  j ava  2s .  c o  m*/
    try {
        final DistributedAtomicLong dal = new DistributedAtomicLong(client, "/counter", new RetryOneTime(1));

        ExecutorService executorService = Executors.newFixedThreadPool(2);
        executorService.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                for (int i = 0; i < 1000; ++i) {
                    dal.increment();
                    Thread.sleep(10);
                }
                return null;
            }
        });
        executorService.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                for (int i = 0; i < 1000; ++i) {
                    dal.forceSet(0L);
                    Thread.sleep(10);
                }
                return null;
            }
        });

        Assert.assertTrue(dal.get().preValue() < 10);
    } finally {
        client.close();
    }
}

From source file:com.flipkart.foxtrot.core.querystore.impl.ElasticsearchQueryStoreTest.java

@Before
public void setUp() throws Exception {
    mapper = new ObjectMapper();
    ElasticsearchUtils.setMapper(mapper);
    dataStore = TestUtils.getDataStore();

    elasticsearchServer = new MockElasticsearchServer(UUID.randomUUID().toString());
    ElasticsearchConnection elasticsearchConnection = Mockito.mock(ElasticsearchConnection.class);
    when(elasticsearchConnection.getClient()).thenReturn(elasticsearchServer.getClient());
    ElasticsearchUtils.initializeMappings(elasticsearchConnection.getClient());
    tableMetadataManager = Mockito.mock(TableMetadataManager.class);
    when(tableMetadataManager.exists(TestUtils.TEST_TABLE_NAME)).thenReturn(true);
    when(tableMetadataManager.get(anyString())).thenReturn(TestUtils.TEST_TABLE);
    AnalyticsLoader analyticsLoader = new AnalyticsLoader(tableMetadataManager, dataStore, queryStore,
            elasticsearchConnection);// w  w  w . j a  v a  2s  .co m
    ExecutorService executorService = Executors.newFixedThreadPool(1);
    QueryExecutor queryExecutor = new QueryExecutor(analyticsLoader, executorService);
    queryStore = new ElasticsearchQueryStore(tableMetadataManager, elasticsearchConnection, dataStore);
}

From source file:grakn.core.deduplicator.AttributeDeduplicatorE2E.java

@Test
public void shouldDeduplicateAttributes() throws InterruptedException, ExecutionException {
    int numOfUniqueNames = 10;
    int numOfDuplicatesPerName = 673;
    ExecutorService executorServiceForParallelInsertion = Executors.newFixedThreadPool(8);

    LOG.info("initiating the shouldDeduplicate10AttributesWithDuplicates test...");
    try (GraknClient.Session session = localhostGrakn.session("attribute_deduplicator_e2e")) {
        // insert 10 attributes, each with 100 duplicates
        LOG.info("defining the schema...");
        defineParentChildSchema(session);
        LOG.info("inserting " + numOfUniqueNames + " unique attributes with " + numOfDuplicatesPerName
                + " duplicates per attribute....");
        insertNameShuffled(session, numOfUniqueNames, numOfDuplicatesPerName,
                executorServiceForParallelInsertion);

        // wait until queue is empty
        LOG.info("names and duplicates have been inserted. waiting for the deduplication to finish...");
        long timeoutMs = 10000;
        waitUntilAllAttributesDeduplicated(timeoutMs);
        LOG.info("deduplication has finished.");

        // verify deduplicated attributes
        LOG.info("verifying the number of attributes");
        int countAfterDeduplication = countTotalNames(session);
        assertThat(countAfterDeduplication, equalTo(numOfUniqueNames));
        LOG.info("test completed successfully. there are " + countAfterDeduplication + " unique names found");
    }/*from  w ww .j a v a2s  .co m*/
}