List of usage examples for io.netty.handler.logging LogLevel INFO
LogLevel INFO
To view the source code for io.netty.handler.logging LogLevel INFO.
Click Source Link
From source file:com.github.nettybook.ch8.HttpSnoopServer.java
License:Apache License
@SuppressWarnings("deprecation") public static void main(String[] args) throws Exception { SslContext sslCtx = null;//from w w w .j av a 2 s. c o m try { File certChainFile = new File("netty.crt"); File keyFile = new File("privatekey.pem"); keyFile.exists(); sslCtx = SslContext.newServerContext(certChainFile, keyFile, "1234"); } catch (SSLException e) { e.printStackTrace(); System.out.println("Can not create SSL context! \n Server will be stop!"); } // Configure the server. EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new HttpSnoopServerInitializer(sslCtx)); Channel ch = b.bind(PORT).sync().channel(); ch.closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:com.github.sinsinpub.pero.frontend.NettySocksServer.java
License:Apache License
public void run() throws InterruptedException { EventLoopGroup bossGroup = new NioEventLoopGroup(1, ThreadFactoryRepository.BOSS_GORUP); EventLoopGroup workerGroup = new NioEventLoopGroup(getMaxWorkerThreads(), ThreadFactoryRepository.WORKER_GROUP); try {//from w w w . j a v a2 s. c o m ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)).childHandler(getSocksServerInitializer()); ChannelFuture cf = b.bind(getPort()).sync(); logger.info( String.format("Proxy server %s %s started.", ApplicationVersion.DEFAULT.getApplicationName(), ApplicationVersion.DEFAULT.getApplicationVersion())); cf.channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:com.github.sinsinpub.pero.manual.proxyhandler.ProxyServer.java
License:Apache License
/** * Starts a new proxy server with disabled authentication for testing purpose. * /*from ww w . j a v a 2 s. com*/ * @param useSsl {@code true} if and only if implicit SSL is enabled * @param testMode the test mode * @param username the expected username. If the client tries to authenticate with a different * username, this server will fail the authentication request. * @param password the expected password. If the client tries to authenticate with a different * password, this server will fail the authentication request. * @param destination the expected destination. If the client requests proxying to a different * destination, this server will reject the connection request. */ protected ProxyServer(final boolean useSsl, TestMode testMode, InetSocketAddress destination, String username, String password, int bindPort, boolean logging) { this.testMode = testMode; this.destination = destination; this.username = username; this.password = password; ServerBootstrap b = new ServerBootstrap(); b.channel(NioServerSocketChannel.class); if (logging) { b.handler(new LoggingHandler(LogLevel.INFO)); } b.group(StaticContextProvider.group); b.childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (useSsl) { p.addLast(StaticContextProvider.serverSslCtx.newHandler(ch.alloc())); } configure(ch); } }); ch = (ServerSocketChannel) b.bind(NetUtil.LOCALHOST, bindPort).syncUninterruptibly().channel(); }
From source file:com.github.unafraid.signer.server.ServerManager.java
License:Apache License
private void init() { SslContext sslCtx = null;//from ww w . j ava 2 s. co m if (SSL) { try { final SelfSignedCertificate ssc = new SelfSignedCertificate("localhost"); sslCtx = SslContext.newServerContext(ssc.certificate(), ssc.privateKey()); } catch (Exception e) { LOGGER.warn(e.getMessage(), e); } } InetAddress listenAddress; try { listenAddress = Inet4Address.getByName(HOSTNAME); } catch (Exception e) { LOGGER.warn("Incorrect listen ip specified: {} using localhost instead!", HOSTNAME); listenAddress = Inet4Address.getLoopbackAddress(); } final EventLoopGroup bossGroup = new NioEventLoopGroup(1); final EventLoopGroup workerGroup = new NioEventLoopGroup(); try { final ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_BACKLOG, 1024); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ServerInitializer(sslCtx)); // Start listening final Channel ch = b.bind(listenAddress, PORT).sync().channel(); LOGGER.info("Open your web browser and navigate to {}://{}{}/", (SSL ? "https" : "http"), listenAddress.getHostAddress(), (PORT != 443 && PORT != 80 ? ":" + PORT : "")); // Block til closed ch.closeFuture().sync(); } catch (Exception e) { LOGGER.warn("Failed to initialize server: ", e); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:com.github.wangshuwei5.server.NettyServer.java
License:Apache License
public void bind() throws Exception { // ??NIO/*from w w w .j a v a 2 s. c o m*/ EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new NettyServerInitializer(SSLMODE.CSA.toString())); // ??? ChannelFuture f = b.bind(NettyConstant.REMOTEIP, NettyConstant.PORT).sync(); // ??? f.channel().closeFuture().sync(); System.out.println("Netty server start ok : " + (NettyConstant.REMOTEIP + " : " + NettyConstant.PORT)); } finally { // ? bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:com.gxkj.demo.netty.echo.EchoServer.java
License:Apache License
public void run() throws Exception { // Configure the server. EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try {/*w w w. j a va2 s .c o m*/ ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast( //new LoggingHandler(LogLevel.INFO), new EchoServerHandler()); } }); // Start the server. ChannelFuture f = b.bind(port).sync(); // Wait until the server socket is closed. f.channel().closeFuture().sync(); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:com.gxkj.demo.netty.proxy.HexDumpProxyInitializer.java
License:Apache License
@Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO), new HexDumpProxyFrontendHandler(remoteHost, remotePort)); }
From source file:com.heliosapm.streams.onramp.OnRampBoot.java
License:Apache License
/** * Creates a new OnRampBoot//from w w w . j av a 2 s.c o m * @param appConfig The application configuration */ public OnRampBoot(final Properties appConfig) { final String jmxmpUri = ConfigurationHelper.getSystemThenEnvProperty("jmx.jmxmp.uri", "jmxmp://0.0.0.0:1893", appConfig); JMXHelper.fireUpJMXMPServer(jmxmpUri); MessageForwarder.initialize(appConfig); port = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.port", 8091, appConfig); bindInterface = ConfigurationHelper.getSystemThenEnvProperty("onramp.network.bind", "0.0.0.0", appConfig); bindSocket = new InetSocketAddress(bindInterface, port); workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.worker_threads", CORES * 2, appConfig); connectTimeout = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sotimeout", 0, appConfig); backlog = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.backlog", 3072, appConfig); writeSpins = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.writespins", 16, appConfig); recvBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.recbuffer", 43690, appConfig); sendBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sendbuffer", 8192, appConfig); disableEpoll = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.epoll.disable", false, appConfig); async = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.async_io", true, appConfig); tcpNoDelay = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.tcp_no_delay", true, appConfig); keepAlive = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.keep_alive", true, appConfig); reuseAddress = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.reuse_address", true, appConfig); tcpPipelineFactory = new PipelineFactory(appConfig); udpPipelineFactory = new UDPPipelineFactory(); tcpServerBootstrap.handler(new LoggingHandler(getClass(), LogLevel.INFO)); tcpServerBootstrap.childHandler(tcpPipelineFactory); // Set the child options tcpServerBootstrap.childOption(ChannelOption.ALLOCATOR, BufferManager.getInstance().getAllocator()); tcpServerBootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay); tcpServerBootstrap.childOption(ChannelOption.SO_KEEPALIVE, keepAlive); tcpServerBootstrap.childOption(ChannelOption.SO_RCVBUF, recvBuffer); tcpServerBootstrap.childOption(ChannelOption.SO_SNDBUF, sendBuffer); tcpServerBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, writeSpins); // Set the server options tcpServerBootstrap.option(ChannelOption.SO_BACKLOG, backlog); tcpServerBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); tcpServerBootstrap.option(ChannelOption.SO_RCVBUF, recvBuffer); tcpServerBootstrap.option(ChannelOption.SO_TIMEOUT, connectTimeout); final StringBuilder tcpUri = new StringBuilder("tcp"); final StringBuilder udpUri = new StringBuilder("udp"); if (IS_LINUX && !disableEpoll) { bossExecutorThreadFactory = new ExecutorThreadFactory("EpollServerBoss", true); bossGroup = new EpollEventLoopGroup(1, (ThreadFactory) bossExecutorThreadFactory); workerExecutorThreadFactory = new ExecutorThreadFactory("EpollServerWorker", true); workerGroup = new EpollEventLoopGroup(workerThreads, (ThreadFactory) workerExecutorThreadFactory); tcpChannelType = EpollServerSocketChannel.class; udpChannelType = EpollDatagramChannel.class; tcpUri.append("epoll"); udpUri.append("epoll"); } else { bossExecutorThreadFactory = new ExecutorThreadFactory("NioServerBoss", true); bossGroup = new NioEventLoopGroup(1, bossExecutorThreadFactory); workerExecutorThreadFactory = new ExecutorThreadFactory("NioServerWorker", true); workerGroup = new NioEventLoopGroup(workerThreads, workerExecutorThreadFactory); tcpChannelType = NioServerSocketChannel.class; udpChannelType = NioDatagramChannel.class; tcpUri.append("nio"); udpUri.append("nio"); } tcpUri.append("://").append(bindInterface).append(":").append(port); udpUri.append("://").append(bindInterface).append(":").append(port); URI u = null; try { u = new URI(tcpUri.toString()); } catch (URISyntaxException e) { log.warn("Failed TCP server URI const: [{}]. Programmer Error", tcpUri, e); } tcpServerURI = u; try { u = new URI(udpUri.toString()); } catch (URISyntaxException e) { log.warn("Failed UDP server URI const: [{}]. Programmer Error", udpUri, e); } udpServerURI = u; log.info(">>>>> Starting OnRamp TCP Listener on [{}]...", tcpServerURI); log.info(">>>>> Starting OnRamp UDP Listener on [{}]...", udpServerURI); final ChannelFuture cf = tcpServerBootstrap.channel(tcpChannelType).group(bossGroup, workerGroup) .bind(bindSocket).awaitUninterruptibly() .addListener(new GenericFutureListener<Future<? super Void>>() { public void operationComplete(final Future<? super Void> f) throws Exception { log.info("<<<<< OnRamp TCP Listener on [{}] Started", tcpServerURI); }; }).awaitUninterruptibly(); final ChannelFuture ucf = udpBootstrap.channel(udpChannelType).group(workerGroup) .option(ChannelOption.SO_BROADCAST, true).handler(new UDPPipelineFactory()).bind(bindSocket) .awaitUninterruptibly().addListener(new GenericFutureListener<Future<? super Void>>() { public void operationComplete(final Future<? super Void> f) throws Exception { log.info("<<<<< OnRamp UDP Listener on [{}] Started", udpServerURI); }; }).awaitUninterruptibly(); tcpServerChannel = cf.channel(); udpServerChannel = ucf.channel(); tcpCloseFuture = tcpServerChannel.closeFuture(); udpCloseFuture = udpServerChannel.closeFuture(); Runtime.getRuntime().addShutdownHook(shutdownHook); }
From source file:com.heliosapm.webrpc.server.RPCServer.java
License:Apache License
private RPCServer(final Properties properties) { log.info(">>>>> RPCServer starting...."); final int port = ConfigurationHelper.getIntSystemThenEnvProperty(CONF_PORT, DEFAULT_PORT, properties); final int workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty(CONF_WORKERS, DEFAULT_WORKERS, properties);/*from www .j a v a2 s.c om*/ final String iface = ConfigurationHelper.getSystemThenEnvProperty(CONF_BIND, DEFAULT_BIND, properties); socketAddress = new InetSocketAddress(iface, port); bossGroup = new NioEventLoopGroup(1); workerExecutor = new JMXManagedThreadPool(workerExecutorObjectName, "WorkerPool", workerThreads, workerThreads * 2, 1, 60000, 100, 99, true); workerGroup = new NioEventLoopGroup(workerThreads, (Executor) workerExecutor); bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)).childHandler(this); serverChannel = bootstrap.bind(socketAddress).syncUninterruptibly().channel(); log.info("<<<<< Started RPCServer on [{}]", socketAddress); }
From source file:com.hiido.eagle.hes.network.FileServer.java
License:Apache License
public void run() throws Exception { // Configure the server. EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); try {/*from w w w . j a va 2 s .c o m*/ ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { //ch.pipeline().addLast(new ChunkedWriteHandler()); ch.pipeline().addLast(new FileHandler()); } }); // Start the server. ChannelFuture f = b.bind(port).sync(); // Wait until the server socket is closed. f.channel().closeFuture().sync(); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }