Example usage for io.netty.channel AdaptiveRecvByteBufAllocator AdaptiveRecvByteBufAllocator

List of usage examples for io.netty.channel AdaptiveRecvByteBufAllocator AdaptiveRecvByteBufAllocator

Introduction

In this page you can find the example usage for io.netty.channel AdaptiveRecvByteBufAllocator AdaptiveRecvByteBufAllocator.

Prototype

public AdaptiveRecvByteBufAllocator(int minimum, int initial, int maximum) 

Source Link

Document

Creates a new predictor with the specified parameters.

Usage

From source file:com.caocao.nio.server.NettyServer.java

License:Apache License

public void initAndStart() {
    System.out.println("port:" + port);
    // Configure the server.
    bossGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() * 2);
    workerGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() * 2);
    try {/*from   w  w  w.ja v a2 s  . co m*/
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.SO_KEEPALIVE, true)
                .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_SNDBUF, 5 * 1024)
                .option(ChannelOption.SO_SNDBUF, 5 * 1024)
                .option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(40, 64, 1024))
                .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
                .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new CustomerInitializer());

        // Start the server.
        ChannelFuture f = b.bind(port).sync();

        // Wait until the server socket is closed.
        f.channel().closeFuture().sync();
    } catch (InterruptedException e) {
        logger.error("netty??", e);
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.yahoo.pulsar.broker.service.BrokerService.java

License:Apache License

public void start() throws Exception {
    this.producerNameGenerator = new DistributedIdGenerator(pulsar.getZkClient(), producerNameGeneratorPath,
            pulsar.getConfiguration().getClusterName());

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    bootstrap.group(acceptorGroup, workerGroup);
    bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
            new AdaptiveRecvByteBufAllocator(1024, 16 * 1024, 1 * 1024 * 1024));

    if (workerGroup instanceof EpollEventLoopGroup) {
        bootstrap.channel(EpollServerSocketChannel.class);
        bootstrap.childOption(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
    } else {/*w  w  w.  j  a v a  2  s . c  o m*/
        bootstrap.channel(NioServerSocketChannel.class);
    }

    ServiceConfiguration serviceConfig = pulsar.getConfiguration();

    bootstrap.childHandler(new PulsarChannelInitializer(this, serviceConfig, false));
    // Bind and start to accept incoming connections.
    bootstrap.bind(new InetSocketAddress(pulsar.getBindAddress(), port)).sync();
    log.info("Started Pulsar Broker service on port {}", port);

    if (serviceConfig.isTlsEnabled()) {
        ServerBootstrap tlsBootstrap = bootstrap.clone();
        tlsBootstrap.childHandler(new PulsarChannelInitializer(this, serviceConfig, true));
        tlsBootstrap.bind(new InetSocketAddress(pulsar.getBindAddress(), tlsPort)).sync();
        log.info("Started Pulsar Broker TLS service on port {}", tlsPort);
    }

    // start other housekeeping functions
    this.startStatsUpdater();
    this.startInactivityMonitor();
    this.startMessageExpiryMonitor();
    this.startBacklogQuotaChecker();
}

From source file:com.yahoo.pulsar.discovery.service.DiscoveryService.java

License:Apache License

/**
 * starts server to handle discovery-request from client-channel
 * //from w  ww. j  a v a 2  s  . com
 * @throws Exception
 */
public void startServer() throws Exception {

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    bootstrap.group(acceptorGroup, workerGroup);
    bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
            new AdaptiveRecvByteBufAllocator(1024, 16 * 1024, 1 * 1024 * 1024));

    if (workerGroup instanceof EpollEventLoopGroup) {
        bootstrap.channel(EpollServerSocketChannel.class);
        bootstrap.childOption(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
    } else {
        bootstrap.channel(NioServerSocketChannel.class);
    }

    bootstrap.childHandler(new ServiceChannelInitializer(this, config, false));
    // Bind and start to accept incoming connections.
    bootstrap.bind(config.getServicePort()).sync();
    LOG.info("Started Pulsar Broker service on port {}", config.getWebServicePort());

    if (config.isTlsEnabled()) {
        ServerBootstrap tlsBootstrap = bootstrap.clone();
        tlsBootstrap.childHandler(new ServiceChannelInitializer(this, config, true));
        tlsBootstrap.bind(config.getServicePortTls()).sync();
        LOG.info("Started Pulsar Broker TLS service on port {}", config.getWebServicePortTls());
    }
}

From source file:lunarion.cluster.coordinator.server.CoordinatorServer.java

License:Open Source License

public void bind(int port) throws InterruptedException {

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR,
                    new AdaptiveRecvByteBufAllocator(64, 1024, 65536 * 512))
            //.childHandler(new ChildChannelHandler())
            .childHandler(new CoordinatorServerChannelInitializer(co, logger))
            .option(ChannelOption.SO_BACKLOG, 1024).childOption(ChannelOption.SO_KEEPALIVE, true);

    Channel ch = bootstrap.bind(port).sync().channel();
    System.err// w w w  .j  av a 2  s  .  co  m
            .println(Timer.currentTime() + "[INFO]: coordinator server channel binded at port: " + port + '.');
    logger.info(Timer.currentTime() + " [COORDINATOR INFO]:  coordinator server channel binded at port: " + port
            + '.');
    ch.closeFuture().sync();
    /*
     *  Bind and start to accept incoming connections.
     */
    // ChannelFuture future = bootstrap.bind(port).sync();
    /*
     *  Wait until the server socket is closed.
     *  In this example, this does not happen, but you can do that to gracefully 
     *  shut down your server.
     */

    // future.channel().closeFuture().sync();

}

From source file:lunarion.node.LunarDBServerStandAlone.java

License:Open Source License

public void bind(int port) throws InterruptedException {

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR,
                    new AdaptiveRecvByteBufAllocator(64, 1024, 65536 * 512))
            .option(ChannelOption.SO_BACKLOG, 1024).option(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_KEEPALIVE, true)
            .childHandler(new LunarDBServerChannelInitializer(node_tc));

    Channel ch = bootstrap.bind(port).sync().channel();
    System.err.println(Timer.currentTime() + " DB node server channel binded at port: " + port + '.');
    logger.info(Timer.currentTime() + " [NODE INFO]:  DB node server channel binded at port: " + port + '.');
    ch.closeFuture().sync();/* www.j a v  a 2 s.  c o m*/
    /*
     *  Bind and start to accept incoming connections.
     */
    // ChannelFuture future = bootstrap.bind(port).sync();
    /*
     *  Wait until the server socket is closed.
     *  In this example, this does not happen, but you can do that to gracefully 
     *  shut down your server.
     */

    // future.channel().closeFuture().sync();

}

From source file:lunarion.node.requester.LunarDBClient.java

License:Open Source License

public ChannelFuture connect(String host, int port) throws Exception {
    group = new NioEventLoopGroup();
    client_handler = new ClientHandler(internal);

    try {//from   w w w.j  a v  a2s .  co  m
        Bootstrap client_bootstrap = new Bootstrap();
        client_bootstrap.group(group).channel(NioSocketChannel.class)
                .option(ChannelOption.SO_SNDBUF, 1024 * 1024)
                .option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(64, 1024, 65536 * 512))
                //.option(ChannelOption.TCP_NODELAY, true)
                .option(ChannelOption.SO_KEEPALIVE, true).handler(new ClientChannelInitializer(client_handler));

        //ChannelFuture cf = client_bootstrap.connect(host, port).sync();
        ChannelFuture cf = client_bootstrap.connect(host, port);
        channel_list.add(cf);

        cf.addListener(new ChannelFutureListener() {
            public void operationComplete(final ChannelFuture channelFuture) throws Exception {
                if (channelFuture.isSuccess()) {
                    ClientHandler handler = channelFuture.channel().pipeline().get(ClientHandler.class);
                    client_handler = handler;

                }
            }
        });

        connected.set(true);
        connected_host_ip = host;
        connected_port = port;

        return cf;
    } finally {

        // group.shutdownGracefully();
    }
}

From source file:org.apache.bookkeeper.proto.BookieNettyServer.java

License:Apache License

private void listenOn(InetSocketAddress address, BookieSocketAddress bookieAddress)
        throws InterruptedException {
    if (!conf.isDisableServerSocketBind()) {
        ServerBootstrap bootstrap = new ServerBootstrap();
        bootstrap.option(ChannelOption.ALLOCATOR, allocator);
        bootstrap.childOption(ChannelOption.ALLOCATOR, allocator);
        bootstrap.group(eventLoopGroup, eventLoopGroup);
        bootstrap.childOption(ChannelOption.TCP_NODELAY, conf.getServerTcpNoDelay());
        bootstrap.childOption(ChannelOption.SO_LINGER, conf.getServerSockLinger());
        bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
                new AdaptiveRecvByteBufAllocator(conf.getRecvByteBufAllocatorSizeMin(),
                        conf.getRecvByteBufAllocatorSizeInitial(), conf.getRecvByteBufAllocatorSizeMax()));
        bootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(
                conf.getServerWriteBufferLowWaterMark(), conf.getServerWriteBufferHighWaterMark()));

        if (eventLoopGroup instanceof EpollEventLoopGroup) {
            bootstrap.channel(EpollServerSocketChannel.class);
        } else {// w  ww. j  a  v a 2s .com
            bootstrap.channel(NioServerSocketChannel.class);
        }

        bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            protected void initChannel(SocketChannel ch) throws Exception {
                synchronized (suspensionLock) {
                    while (suspended) {
                        suspensionLock.wait();
                    }
                }

                BookieSideConnectionPeerContextHandler contextHandler = new BookieSideConnectionPeerContextHandler();
                ChannelPipeline pipeline = ch.pipeline();

                // For ByteBufList, skip the usual LengthFieldPrepender and have the encoder itself to add it
                pipeline.addLast("bytebufList", ByteBufList.ENCODER_WITH_SIZE);

                pipeline.addLast("lengthbaseddecoder",
                        new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
                pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));

                pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.RequestDecoder(registry));
                pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.ResponseEncoder(registry));
                pipeline.addLast("bookieAuthHandler", new AuthHandler.ServerSideHandler(
                        contextHandler.getConnectionPeer(), authProviderFactory));

                ChannelInboundHandler requestHandler = isRunning.get()
                        ? new BookieRequestHandler(conf, requestProcessor, allChannels)
                        : new RejectRequestHandler();
                pipeline.addLast("bookieRequestHandler", requestHandler);

                pipeline.addLast("contextHandler", contextHandler);
            }
        });

        // Bind and start to accept incoming connections
        Channel listen = bootstrap.bind(address.getAddress(), address.getPort()).sync().channel();
        if (listen.localAddress() instanceof InetSocketAddress) {
            if (conf.getBookiePort() == 0) {
                conf.setBookiePort(((InetSocketAddress) listen.localAddress()).getPort());
            }
        }
    }

    if (conf.isEnableLocalTransport()) {
        ServerBootstrap jvmBootstrap = new ServerBootstrap();
        jvmBootstrap.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        jvmBootstrap.group(jvmEventLoopGroup, jvmEventLoopGroup);
        jvmBootstrap.childOption(ChannelOption.TCP_NODELAY, conf.getServerTcpNoDelay());
        jvmBootstrap.childOption(ChannelOption.SO_KEEPALIVE, conf.getServerSockKeepalive());
        jvmBootstrap.childOption(ChannelOption.SO_LINGER, conf.getServerSockLinger());
        jvmBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
                new AdaptiveRecvByteBufAllocator(conf.getRecvByteBufAllocatorSizeMin(),
                        conf.getRecvByteBufAllocatorSizeInitial(), conf.getRecvByteBufAllocatorSizeMax()));
        jvmBootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(
                conf.getServerWriteBufferLowWaterMark(), conf.getServerWriteBufferHighWaterMark()));

        if (jvmEventLoopGroup instanceof DefaultEventLoopGroup) {
            jvmBootstrap.channel(LocalServerChannel.class);
        } else if (jvmEventLoopGroup instanceof EpollEventLoopGroup) {
            jvmBootstrap.channel(EpollServerSocketChannel.class);
        } else {
            jvmBootstrap.channel(NioServerSocketChannel.class);
        }

        jvmBootstrap.childHandler(new ChannelInitializer<LocalChannel>() {
            @Override
            protected void initChannel(LocalChannel ch) throws Exception {
                synchronized (suspensionLock) {
                    while (suspended) {
                        suspensionLock.wait();
                    }
                }

                BookieSideConnectionPeerContextHandler contextHandler = new BookieSideConnectionPeerContextHandler();
                ChannelPipeline pipeline = ch.pipeline();

                pipeline.addLast("lengthbaseddecoder",
                        new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
                pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));

                pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.RequestDecoder(registry));
                pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.ResponseEncoder(registry));
                pipeline.addLast("bookieAuthHandler", new AuthHandler.ServerSideHandler(
                        contextHandler.getConnectionPeer(), authProviderFactory));

                ChannelInboundHandler requestHandler = isRunning.get()
                        ? new BookieRequestHandler(conf, requestProcessor, allChannels)
                        : new RejectRequestHandler();
                pipeline.addLast("bookieRequestHandler", requestHandler);

                pipeline.addLast("contextHandler", contextHandler);
            }
        });

        // use the same address 'name', so clients can find local Bookie still discovering them using ZK
        jvmBootstrap.bind(bookieAddress.getLocalAddress()).sync();
        LocalBookiesRegistry.registerLocalBookieAddress(bookieAddress);
    }
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *//*from w  w  w .  j ava2 s . co m*/
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}

From source file:org.apache.pulsar.proxy.server.ProxyService.java

License:Apache License

public void start() throws Exception {
    if (!isBlank(proxyConfig.getZookeeperServers()) && !isBlank(proxyConfig.getConfigurationStoreServers())) {
        discoveryProvider = new BrokerDiscoveryProvider(this.proxyConfig, getZooKeeperClientFactory());
        this.configurationCacheService = new ConfigurationCacheService(discoveryProvider.globalZkCache);
        authorizationService = new AuthorizationService(PulsarConfigurationLoader.convertFrom(proxyConfig),
                configurationCacheService);
    }/*from  www .  j av a 2 s .  co m*/

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    bootstrap.group(acceptorGroup, workerGroup);
    bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
            new AdaptiveRecvByteBufAllocator(1024, 16 * 1024, 1 * 1024 * 1024));

    bootstrap.channel(EventLoopUtil.getServerSocketChannelClass(workerGroup));
    EventLoopUtil.enableTriggeredMode(bootstrap);

    bootstrap.childHandler(new ServiceChannelInitializer(this, proxyConfig, false));
    // Bind and start to accept incoming connections.
    if (proxyConfig.getServicePort().isPresent()) {
        try {
            bootstrap.bind(proxyConfig.getServicePort().get()).sync();
            LOG.info("Started Pulsar Proxy at {}", serviceUrl);
        } catch (Exception e) {
            throw new IOException("Failed to bind Pulsar Proxy on port " + proxyConfig.getServicePort().get(),
                    e);
        }
    }
    LOG.info("Started Pulsar Proxy at {}", serviceUrl);

    if (proxyConfig.getServicePortTls().isPresent()) {
        ServerBootstrap tlsBootstrap = bootstrap.clone();
        tlsBootstrap.childHandler(new ServiceChannelInitializer(this, proxyConfig, true));
        tlsBootstrap.bind(proxyConfig.getServicePortTls().get()).sync();
        LOG.info("Started Pulsar TLS Proxy on port {}", proxyConfig.getServicePortTls().get());
    }
}

From source file:org.elasticsearch.hadoop.http.netty4.Netty4HttpServerTransport.java

License:Apache License

@Inject
public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays,
        ThreadPool threadPool) {//  w  w w.ja  v a 2  s  . co  m
    super(settings);
    this.networkService = networkService;
    this.bigArrays = bigArrays;
    this.threadPool = threadPool;

    ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
    this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
    this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
    this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
    this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings);
    this.maxCumulationBufferCapacity = SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings);
    this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
    this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings);
    this.blockingServer = SETTING_HTTP_TCP_BLOCKING_SERVER.get(settings);
    this.port = SETTING_HTTP_PORT.get(settings);
    this.bindHosts = SETTING_HTTP_BIND_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
    this.publishHosts = SETTING_HTTP_PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
    this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings);
    this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings);
    this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings);
    this.tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings);
    this.tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
    this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);

    // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
    ByteSizeValue receivePredictorMin = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
    ByteSizeValue receivePredictorMax = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
    if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
        recvByteBufAllocator = new FixedRecvByteBufAllocator(Math.toIntExact(receivePredictorMax.bytes()));
    } else {
        recvByteBufAllocator = new AdaptiveRecvByteBufAllocator(Math.toIntExact(receivePredictorMin.bytes()),
                Math.toIntExact(receivePredictorMin.bytes()), Math.toIntExact(receivePredictorMax.bytes()));
    }

    this.compression = SETTING_HTTP_COMPRESSION.get(settings);
    this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
    this.pipelining = SETTING_PIPELINING.get(settings);
    this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
    this.corsConfig = buildCorsConfig(settings);

    // validate max content length
    if (maxContentLength.bytes() > Integer.MAX_VALUE) {
        logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength);
        maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB);
    }
    this.maxContentLength = maxContentLength;

    logger.debug(
            "using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], "
                    + "receive_predictor[{}->{}], pipelining[{}], pipelining_max_events[{}]",
            maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictorMin,
            receivePredictorMax, pipelining, pipeliningMaxEvents);
}