Example usage for java.util.concurrent Executors newCachedThreadPool

List of usage examples for java.util.concurrent Executors newCachedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newCachedThreadPool.

Prototype

public static ExecutorService newCachedThreadPool() 

Source Link

Document

Creates a thread pool that creates new threads as needed, but will reuse previously constructed threads when they are available.

Usage

From source file:com.bigdata.rdf.sail.remote.BigdataSailRemoteRepository.java

/**
 * Ctor that simply specifies an endpoint and lets this class manage the
 * ClientConnectionManager for the HTTP client and the manage the
 * ExecutorService. More convenient./*from  w  w w  .  j  a v a 2s.co m*/
 */
public BigdataSailRemoteRepository(final String sparqlEndpointURL) {

    this.executor = Executors.newCachedThreadPool();

    this.ccm = DefaultClientConnectionManagerFactory.getInstance().newInstance();

    final DefaultHttpClient httpClient = new DefaultHttpClient(ccm);

    /*
     * Enable a standard http redirect policy. This allows references to
     * http://localhost:8080 to be redirected to
     * http://localhost:8080/bigdata.
     */
    httpClient.setRedirectStrategy(new DefaultRedirectStrategy());

    this.nss = new RemoteRepository(sparqlEndpointURL, httpClient, executor);

}

From source file:com.nhn.android.archetype.base.AABaseApplication.java

protected void initM2base() {

    // m2base code
    workExecutor = Executors.newCachedThreadPool();
    handler = new Handler(Looper.getMainLooper());
    backgroundHandlerThread = new HandlerThread("BandBackgroundHandlerThread");
    backgroundHandlerThread.start();/*from  w ww. j  ava2 s.  c o  m*/
    backgroundHandler = new Handler(backgroundHandlerThread.getLooper());

    JsonWorker.init();

    logger.d("Application init completed.....");
}

From source file:be.vlaanderen.sesam.proxy.internal.http.HttpCentral.java

public HttpCentral() {
    // TODO move to BundleActivator; (might do this as spring bean as well)
    executorService = Executors.newCachedThreadPool();
}

From source file:com.alibaba.otter.shared.arbitrate.zookeeper.DistributedReentrantLockTest.java

@Test
protected void test_try_lock() {
    ExecutorService exeucotr = Executors.newCachedThreadPool();
    final int count = 50;
    final CountDownLatch latch = new CountDownLatch(count);

    final DistributedReentrantLock lock = new DistributedReentrantLock(dir);
    for (int i = 0; i < count; i++) {
        exeucotr.submit(new Runnable() {

            public void run() {
                try {
                    while (lock.tryLock() == false) {
                        Thread.sleep(100 + RandomUtils.nextInt(100));
                    }/*from   ww  w.  j  a  v a2  s .c o m*/

                    System.out.println("id: " + lock.getId() + " is leader: " + lock.isOwner());
                } catch (InterruptedException e) {
                    want.fail();
                } catch (KeeperException e) {
                    want.fail();
                } finally {
                    latch.countDown();
                    try {
                        lock.unlock();
                    } catch (KeeperException e) {
                        want.fail();
                    }
                }

            }
        });
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        want.fail();
    }

    exeucotr.shutdown();
}

From source file:ninja.eivind.hotsreplayuploader.services.UploaderService.java

public UploaderService() throws IOException {
    logger.info("Instantiating " + getClass().getSimpleName());
    uploadQueue = new LinkedBlockingQueue<>();
    files = FXCollections.observableArrayList();
    setExecutor(Executors.newCachedThreadPool());
    logger.info("Instantiated " + getClass().getSimpleName());
}

From source file:com.alibaba.otter.shared.arbitrate.zookeeper.DistributedLockTest.java

@Test
protected void test_try_lock() {
    ExecutorService exeucotr = Executors.newCachedThreadPool();
    final int count = 50;
    final CountDownLatch latch = new CountDownLatch(count);

    final DistributedLock[] nodes = new DistributedLock[count];
    for (int i = 0; i < count; i++) {
        final DistributedLock node = new DistributedLock(dir);
        nodes[i] = node;// w  w  w . ja va  2  s. c o m
        exeucotr.submit(new Runnable() {

            public void run() {
                try {
                    while (node.tryLock() == false) {
                        Thread.sleep(100 + RandomUtils.nextInt(100));
                    }

                    System.out.println("id: " + node.getId() + " is leader: " + node.isOwner());
                } catch (InterruptedException e) {
                    want.fail();
                } catch (KeeperException e) {
                    want.fail();
                } finally {
                    latch.countDown();
                    try {
                        node.unlock();
                    } catch (KeeperException e) {
                        want.fail();
                    }
                }

            }
        });
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        want.fail();
    }

    exeucotr.shutdown();
}

From source file:com.fly1tkg.streamfileupload.FileUploadFacade.java

public void post(final String url, final String fileKey, final File file, final String contentType,
        final Map<String, String> params, final FileUploadCallback callback) {

    if (null == callback) {
        throw new RuntimeException("FileUploadCallback should not be null.");
    }//from   www  .  j av a 2  s .c  o  m

    ExecutorService executorService = Executors.newCachedThreadPool();
    executorService.execute(new Runnable() {
        public void run() {
            try {
                HttpPost httpPost = new HttpPost(url);

                FileBody fileBody;
                if (null == contentType) {
                    fileBody = new FileBody(file);
                } else {
                    fileBody = new FileBody(file, contentType);
                }

                MultipartEntity entity = new MultipartEntity(HttpMultipartMode.BROWSER_COMPATIBLE);
                if (null == fileKey) {
                    entity.addPart(DEFAULT_FILE_KEY, fileBody);
                } else {
                    entity.addPart(fileKey, fileBody);
                }

                if (null != params) {
                    for (Map.Entry<String, String> e : params.entrySet()) {
                        entity.addPart(e.getKey(), new StringBody(e.getValue()));
                    }
                }

                httpPost.setEntity(entity);

                upload(httpPost, callback);
            } catch (UnsupportedEncodingException e) {
                callback.onFailure(-1, null, e);
            }
        }
    });
}

From source file:reactor.ipc.netty.tcp.TcpServerTests.java

@Before
public void loadEnv() {
    latch = new CountDownLatch(msgs * threads);
    threadPool = Executors.newCachedThreadPool();
}

From source file:com.easarrive.aws.plugins.common.service.impl.SQSNotificationService.java

/**
 * {@inheritDoc}/*from  w w w .  java2 s. com*/
 */
@Override
public List<NotificationHandleResult<Message, Boolean>> handleNotification(Message message)
        throws AWSPluginException {
    try {
        if (message == null) {
            throw new AWSPluginException("The message is null");
        }
        if (notificationHandlerList == null) {
            throw new AWSPluginException("The notificationHandlerList is null for message (ID : %s)",
                    message.getMessageId());
        }
        if (notificationHandlerList.isEmpty()) {
            throw new AWSPluginException("The notificationHandlerList is empty for message (ID : %s)",
                    message.getMessageId());
        }
        ExecutorService executorService = Executors.newCachedThreadPool();
        List<Future<List<NotificationHandleResult<Message, Boolean>>>> resultList = new ArrayList<Future<List<NotificationHandleResult<Message, Boolean>>>>();
        for (INotificationHandler<Message, Boolean> notificationHandler : notificationHandlerList) {
            Future<List<NotificationHandleResult<Message, Boolean>>> future = executorService
                    .submit(new NotificationHandlerCallable(notificationHandler, message));
            resultList.add(future);
        }

        List<NotificationHandleResult<Message, Boolean>> returnList = new ArrayList<NotificationHandleResult<Message, Boolean>>();
        //??
        for (Future<List<NotificationHandleResult<Message, Boolean>>> fs : resultList) {
            try {
                List<NotificationHandleResult<Message, Boolean>> result = fs.get();
                for (NotificationHandleResult<Message, Boolean> notificationHandleResult : result) {
                    returnList.add(notificationHandleResult);
                }
            } catch (Exception e) {
                if (logger.isErrorEnabled()) {
                    logger.error(e.getMessage(), e);
                }
                returnList.add(
                        new NotificationHandleResult<Message, Boolean>(message.getMessageId(), message, false));
            } finally {
                //????????
                executorService.shutdown();
            }
        }
        return returnList;
    } catch (Exception e) {
        throw new AWSPluginException(e.getMessage(), e);
    }
}

From source file:com.codefollower.lealone.omid.tso.TSOServer.java

@Override
public void run() {
    // *** Start the Netty configuration ***
    // Start server with Nb of active threads = 2*NB CPU + 1 as maximum.
    ChannelFactory factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
            Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);

    ServerBootstrap bootstrap = new ServerBootstrap(factory);
    // Create the global ChannelGroup
    ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
    // threads max
    // int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
    int maxThreads = 5;
    // Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
    ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576,
            1073741824, 100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
                @Override/*  www  .ja  v a 2s .  co m*/
                public int estimateSize(Object o) {
                    return 1000;
                }
            }, Executors.defaultThreadFactory());

    state = BookKeeperStateBuilder.getState(config);
    if (state == null) {
        LOG.error("Couldn't build state");
        return;
    }

    state.addRecord(new byte[] { LoggerProtocol.LOG_START }, new AddRecordCallback() {
        @Override
        public void addRecordComplete(int rc, Object ctx) {
        }
    }, null);

    LOG.info("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS);
    LOG.info("PARAM BATCH_SIZE: " + config.getBatchSize());
    LOG.info("PARAM MAX_THREADS: " + maxThreads);

    final TSOHandler handler = new TSOHandler(channelGroup, state, config.getBatchSize());
    handler.start();

    bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
    bootstrap.setOption("tcpNoDelay", false);
    //setting buffer size can improve I/O
    bootstrap.setOption("child.sendBufferSize", 1048576);
    bootstrap.setOption("child.receiveBufferSize", 1048576);
    // better to have an receive buffer predictor
    bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory());
    //if the server is sending 1000 messages per sec, optimum write buffer water marks will
    //prevent unnecessary throttling, Check NioSocketChannelConfig doc
    bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
    bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);

    bootstrap.setOption("child.tcpNoDelay", false);
    bootstrap.setOption("child.keepAlive", true);
    bootstrap.setOption("child.reuseAddress", true);
    bootstrap.setOption("child.connectTimeoutMillis", 60000);

    // *** Start the Netty running ***

    // Create the monitor
    ThroughputMonitor monitor = new ThroughputMonitor(state);
    // Add the parent channel to the group
    Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
    channelGroup.add(channel);

    // Compacter handler
    ChannelFactory comFactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
            Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
    ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
    ChannelGroup comGroup = new DefaultChannelGroup("compacter");
    final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
    comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {

        @Override
        public ChannelPipeline getPipeline() throws Exception {
            ChannelPipeline pipeline = Channels.pipeline();
            pipeline.addLast("decoder", new ObjectDecoder());
            pipeline.addLast("encoder", new ObjectEncoder());
            pipeline.addLast("handler", comHandler);
            return pipeline;
        }
    });
    comBootstrap.setOption("tcpNoDelay", false);
    comBootstrap.setOption("child.tcpNoDelay", false);
    comBootstrap.setOption("child.keepAlive", true);
    comBootstrap.setOption("child.reuseAddress", true);
    comBootstrap.setOption("child.connectTimeoutMillis", 100);
    comBootstrap.setOption("readWriteFair", true);
    channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));

    // Starts the monitor
    monitor.start();
    synchronized (lock) {
        while (!finish) {
            try {
                lock.wait();
            } catch (InterruptedException e) {
                break;
            }
        }
    }

    handler.stop();
    comHandler.stop();
    state.stop();

    // *** Start the Netty shutdown ***

    // End the monitor
    LOG.info("End of monitor");
    monitor.interrupt();
    // Now close all channels
    LOG.info("End of channel group");
    channelGroup.close().awaitUninterruptibly();
    comGroup.close().awaitUninterruptibly();
    // Close the executor for Pipeline
    LOG.info("End of pipeline executor");
    pipelineExecutor.shutdownNow();
    // Now release resources
    LOG.info("End of resources");
    factory.releaseExternalResources();
    comFactory.releaseExternalResources();
}