Example usage for java.util.concurrent Executors newCachedThreadPool

List of usage examples for java.util.concurrent Executors newCachedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newCachedThreadPool.

Prototype

public static ExecutorService newCachedThreadPool() 

Source Link

Document

Creates a thread pool that creates new threads as needed, but will reuse previously constructed threads when they are available.

Usage

From source file:net.oneandone.stool.overview.config.OverviewConfiguration.java

@Bean
public ExecutorService executorService() {
    return Executors.newCachedThreadPool();
}

From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java

@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Test//from  w  w w. jav  a 2s  . co m
public void testMulti() throws Exception {
    final String PATH = "/queue";
    final int CLIENT_QTY = 4;
    final int MAX_ITEMS = 10;
    final int ADD_ITEMS = MAX_ITEMS * 100;
    final int SLOP_FACTOR = 2;

    final QueueConsumer<String> consumer = new QueueConsumer<String>() {
        @Override
        public void consumeMessage(String message) throws Exception {
            Thread.sleep(10);
        }

        @Override
        public void stateChanged(CuratorFramework client, ConnectionState newState) {
        }
    };

    final Timing timing = new Timing();
    final ExecutorService executor = Executors.newCachedThreadPool();
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executor);

    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    try {
        client.start();
        client.create().forPath(PATH);

        final CountDownLatch isWaitingLatch = new CountDownLatch(1);
        final AtomicBoolean isDone = new AtomicBoolean(false);
        final List<Integer> counts = new CopyOnWriteArrayList<Integer>();
        final Object lock = new Object();
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                Watcher watcher = new Watcher() {
                    @Override
                    public void process(WatchedEvent event) {
                        synchronized (lock) {
                            lock.notifyAll();
                        }
                    }
                };

                while (!Thread.currentThread().isInterrupted() && client.isStarted() && !isDone.get()) {
                    synchronized (lock) {
                        int size = client.getChildren().usingWatcher(watcher).forPath(PATH).size();
                        counts.add(size);
                        isWaitingLatch.countDown();
                        lock.wait();
                    }
                }
                return null;
            }
        });
        isWaitingLatch.await();

        for (int i = 0; i < CLIENT_QTY; ++i) {
            final int index = i;
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = null;
                    DistributedQueue<String> queue = null;

                    try {
                        client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
                                timing.connection(), new RetryOneTime(1));
                        client.start();
                        queue = QueueBuilder.builder(client, consumer, serializer, PATH).executor(executor)
                                .maxItems(MAX_ITEMS).putInBackground(false).lockPath("/locks").buildQueue();
                        queue.start();

                        for (int i = 0; i < ADD_ITEMS; ++i) {
                            queue.put("" + index + "-" + i);
                        }
                    } finally {
                        IOUtils.closeQuietly(queue);
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.take().get();
        }

        isDone.set(true);
        synchronized (lock) {
            lock.notifyAll();
        }

        for (int count : counts) {
            Assert.assertTrue(counts.toString(), count <= (MAX_ITEMS * SLOP_FACTOR));
        }
    } finally {
        executor.shutdownNow();
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.ignite.console.agent.handlers.RestListener.java

/** {@inheritDoc} */
@Override
protected ExecutorService newThreadPool() {
    return Executors.newCachedThreadPool();
}

From source file:co.paralleluniverse.galaxy.netty.AbstractTcpServer.java

@Override
protected void init() throws Exception {
    super.init();
    if (bossExecutor == null)
        bossExecutor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    if (workerExecutor == null)
        workerExecutor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    final short currentNodeId = getCluster().getMyNodeId();
    configureThreadPool(currentNodeId + "-" + getName() + "-tcpServerBoss", bossExecutor);
    configureThreadPool(currentNodeId + "-" + getName() + "-tcpServerWorker", workerExecutor);
    if (receiveExecutor != null)
        configureThreadPool(currentNodeId + "-" + getName() + "-tcpServerReceive", receiveExecutor);

    channelFactory = new NioServerSocketChannelFactory(
            new NioServerBossPool(bossExecutor, NettyUtils.DEFAULT_BOSS_COUNT, KEEP_UNCHANGED_DETERMINER),
            new NioWorkerPool(workerExecutor, NettyUtils.getWorkerCount(workerExecutor),
                    KEEP_UNCHANGED_DETERMINER));
    bootstrap = new ServerBootstrap(channelFactory);

    origChannelFacotry = new TcpMessagePipelineFactory(LOG, channels, receiveExecutor) {
        @Override/*from  w  w w. j a va 2  s.  co  m*/
        public ChannelPipeline getPipeline() throws Exception {
            final ChannelPipeline pipeline = super.getPipeline();
            pipeline.addBefore("messageCodec", "nodeNameReader", new ChannelNodeNameReader(getCluster()));
            pipeline.addLast("router", channelHandler);
            if (testHandler != null)
                pipeline.addLast("test", testHandler);
            return pipeline;
        }
    };

    bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
        @Override
        public ChannelPipeline getPipeline() throws Exception {
            return AbstractTcpServer.this.getPipeline();
        }
    });
    //bootstrap.setParentHandler(new LoggingHandler(LOG));

    bootstrap.setOption("reuseAddress", true);
    bootstrap.setOption("child.tcpNoDelay", true);
    bootstrap.setOption("child.keepAlive", true);
}

From source file:com.aretha.content.image.AsyncImageLoader.java

private AsyncImageLoader(Context context) {
    mContext = context.getApplicationContext();
    mFileCacheManager = new FileCacheManager(context);
    mExecutor = Executors.newCachedThreadPool();
    mTaskList = new LinkedList<ImageLoadingTask>();
    DisplayMetrics displayMetrics = context.getResources().getDisplayMetrics();
    mScreenWidth = displayMetrics.widthPixels;
    mScreenHeight = displayMetrics.heightPixels;
    // will notify the main thread
    mImageLoadedHandler = new ImageLoadHandler(context.getMainLooper());
}

From source file:byps.test.TestRemoteServerR.java

public void testSimultan() throws InterruptedException {
    Executor tpool = Executors.newCachedThreadPool();
    int nbOfThreads = 5;
    final CountDownLatch cdl = new CountDownLatch(nbOfThreads);
    for (int t = 0; t < nbOfThreads; t++) {
        Runnable run = new Runnable() {
            public void run() {
                TestRemoteServerR testObj = new TestRemoteServerR();
                try {
                    testObj.testLoop10();
                } catch (Throwable e) {
                    log.error("exception", e);
                } finally {
                    cdl.countDown();//  w ww . j a va2 s  . co m
                }
            }
        };
        tpool.execute(run);
    }
    cdl.await();
}

From source file:com.olacabs.fabric.compute.sources.kafka.impl.KafkaReaderLeaderElector.java

public KafkaReaderLeaderElector(String topology, String topic, Map<Integer, KafkaMessageReader> readers,
        CuratorFramework curatorFramework, ObjectMapper mapper) {
    this.topology = topology;
    this.topic = topic;
    this.readers = readers;
    this.curatorFramework = curatorFramework;
    this.mapper = mapper;

    this.readerId = UUID.randomUUID().toString();
    this.executorService = Executors.newCachedThreadPool(); //= Executors.newFixedThreadPool(readers.size());
}

From source file:org.oesf.eque.services.impl.QueueDispatcherImpl.java

@PostConstruct
public void setup() {
    threadPool = Executors.newCachedThreadPool();
    LOG.debug("QDS-C53016E36B8B: Is up and running.");
}

From source file:com.linkedin.pinot.integration.tests.RealtimeClusterIntegrationTest.java

@BeforeClass
public void setUp() throws Exception {
    // Start ZK and Kafka
    startZk();/*from ww w  . j  av a 2  s  .  c  o m*/
    kafkaStarters = KafkaStarterUtils.startServers(getKafkaBrokerCount(), KafkaStarterUtils.DEFAULT_KAFKA_PORT,
            KafkaStarterUtils.DEFAULT_ZK_STR, KafkaStarterUtils.getDefaultKafkaConfiguration());

    // Create Kafka topic
    createKafkaTopic(KAFKA_TOPIC, KafkaStarterUtils.DEFAULT_ZK_STR);

    // Start the Pinot cluster
    startController();
    startBroker();
    startServer();

    // Unpack data
    final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT);

    File schemaFile = getSchemaFile();

    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    // Push data into the Kafka topic
    pushAvroIntoKafka(avroFiles, executor, KAFKA_TOPIC);

    // Wait for data push, query generator initialization and H2 load to complete
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Create Pinot table
    setUpTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC,
            schemaFile, avroFiles.get(0));

    // Wait until the Pinot event count matches with the number of events in the Avro files
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;
    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from mytable");
    ResultSet rs = statement.getResultSet();
    rs.first();
    int h2RecordCount = rs.getInt(1);
    rs.close();

    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}

From source file:com.adaptris.http.HttpListener.java

/**
 * HttpListener Constructor./*from   w ww.  j  av  a 2  s  . c o m*/
 * 
 * @param listenPort the port to listen on
 * @param poolSize the initial size of the threadpool that will service
 *          requests.
 */
public HttpListener(int listenPort, int poolSize) {
    this();
    this.listenPort = listenPort;
    this.poolSize = poolSize;
    requestProcessors = new Hashtable();
    dispatcherPool = Executors.newCachedThreadPool();
    if (dispatcherPool instanceof ThreadPoolExecutor) {
        ((ThreadPoolExecutor) dispatcherPool).setCorePoolSize(poolSize);
    }
}