List of usage examples for io.netty.handler.logging LoggingHandler LoggingHandler
public LoggingHandler(String name, LogLevel level)
From source file:com.cloudera.livy.client.local.rpc.Rpc.java
License:Apache License
private static Rpc createRpc(LocalConf config, SaslHandler saslHandler, SocketChannel client, EventExecutorGroup egroup) throws IOException { LogLevel logLevel = LogLevel.TRACE;//from w w w . j a va2s .c o m String logLevelStr = config.get(RPC_CHANNEL_LOG_LEVEL); if (logLevelStr != null) { try { logLevel = LogLevel.valueOf(logLevelStr); } catch (Exception e) { LOG.warn("Invalid log level {}, reverting to default.", logLevelStr); } } boolean logEnabled = false; switch (logLevel) { case DEBUG: logEnabled = LOG.isDebugEnabled(); break; case ERROR: logEnabled = LOG.isErrorEnabled(); break; case INFO: logEnabled = LOG.isInfoEnabled(); break; case TRACE: logEnabled = LOG.isTraceEnabled(); break; case WARN: logEnabled = LOG.isWarnEnabled(); break; } if (logEnabled) { client.pipeline().addLast("logger", new LoggingHandler(Rpc.class, logLevel)); } KryoMessageCodec kryo = new KryoMessageCodec(config.getInt(RPC_MAX_MESSAGE_SIZE), MessageHeader.class, NullMessage.class, SaslMessage.class); saslHandler.setKryoMessageCodec(kryo); client.pipeline().addLast("codec", kryo).addLast("sasl", saslHandler); return new Rpc(config, client, egroup); }
From source file:com.cloudera.livy.rsc.rpc.Rpc.java
License:Apache License
private static Rpc createRpc(RSCConf config, SaslHandler saslHandler, SocketChannel client, EventExecutorGroup egroup) throws IOException { LogLevel logLevel = LogLevel.TRACE;// w w w . j a va 2 s. c om String logLevelStr = config.get(RPC_CHANNEL_LOG_LEVEL); if (logLevelStr != null) { try { logLevel = LogLevel.valueOf(logLevelStr); } catch (Exception e) { LOG.warn("Invalid log level {}, reverting to default.", logLevelStr); } } boolean logEnabled = false; switch (logLevel) { case DEBUG: logEnabled = LOG.isDebugEnabled(); break; case ERROR: logEnabled = LOG.isErrorEnabled(); break; case INFO: logEnabled = LOG.isInfoEnabled(); break; case TRACE: logEnabled = LOG.isTraceEnabled(); break; case WARN: logEnabled = LOG.isWarnEnabled(); break; } if (logEnabled) { client.pipeline().addLast("logger", new LoggingHandler(Rpc.class, logLevel)); } KryoMessageCodec kryo = new KryoMessageCodec(config.getInt(RPC_MAX_MESSAGE_SIZE), MessageHeader.class, NullMessage.class, SaslMessage.class); saslHandler.setKryoMessageCodec(kryo); client.pipeline().addLast("codec", kryo).addLast("sasl", saslHandler); return new Rpc(config, client, egroup); }
From source file:com.heliosapm.shorthand.caster.broadcast.BroadcastListener.java
License:Open Source License
/** * Starts a listener on the passed socket address * @param isa The socket address to listen on * @param nic The network interface to listen on *//*from w w w.ja v a 2s . c o m*/ public void startListener(InetSocketAddress isa, NetworkInterface nic) { Channel channel = null; if (isa.getAddress().isMulticastAddress()) { channel = bootstrap.group(group).channel(NioDatagramChannel.class) // .option(ChannelOption.SO_BROADCAST, true) .option(ChannelOption.IP_MULTICAST_ADDR, isa.getAddress()) .option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.IP_MULTICAST_IF, NetUtil.LOOPBACK_IF) .handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); pipeline.addLast(new LoggingHandler(BroadcastListener.class, LogLevel.DEBUG)); pipeline.addLast(router); } }).localAddress(isa).bind(isa.getPort()).syncUninterruptibly().channel(); ((NioDatagramChannel) channel).joinGroup(isa, NetUtil.LOOPBACK_IF).syncUninterruptibly(); //.bind(isa.getPort()).syncUninterruptibly().channel(); log("Bound to Multicast [%s]", isa); } else { channel = bootstrap.group(group).channel(NioDatagramChannel.class) .option(ChannelOption.SO_BROADCAST, true).handler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); pipeline.addLast(new LoggingHandler(BroadcastListener.class, LogLevel.DEBUG)); pipeline.addLast(router); } }).localAddress(isa).bind(isa).syncUninterruptibly().channel(); log("Bound to Broadcast UDP [%s]", isa); } boundChannels.add(channel); //.bind().syncUninterruptibly().channel(); boundChannels.add(channel); log("Started Broadcast Listener on [%s]", isa); }
From source file:com.heliosapm.streams.onramp.OnRampBoot.java
License:Apache License
/** * Creates a new OnRampBoot/*w w w.j a v a2 s.c o m*/ * @param appConfig The application configuration */ public OnRampBoot(final Properties appConfig) { final String jmxmpUri = ConfigurationHelper.getSystemThenEnvProperty("jmx.jmxmp.uri", "jmxmp://0.0.0.0:1893", appConfig); JMXHelper.fireUpJMXMPServer(jmxmpUri); MessageForwarder.initialize(appConfig); port = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.port", 8091, appConfig); bindInterface = ConfigurationHelper.getSystemThenEnvProperty("onramp.network.bind", "0.0.0.0", appConfig); bindSocket = new InetSocketAddress(bindInterface, port); workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.worker_threads", CORES * 2, appConfig); connectTimeout = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sotimeout", 0, appConfig); backlog = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.backlog", 3072, appConfig); writeSpins = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.writespins", 16, appConfig); recvBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.recbuffer", 43690, appConfig); sendBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sendbuffer", 8192, appConfig); disableEpoll = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.epoll.disable", false, appConfig); async = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.async_io", true, appConfig); tcpNoDelay = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.tcp_no_delay", true, appConfig); keepAlive = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.keep_alive", true, appConfig); reuseAddress = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.reuse_address", true, appConfig); tcpPipelineFactory = new PipelineFactory(appConfig); udpPipelineFactory = new UDPPipelineFactory(); tcpServerBootstrap.handler(new LoggingHandler(getClass(), LogLevel.INFO)); tcpServerBootstrap.childHandler(tcpPipelineFactory); // Set the child options tcpServerBootstrap.childOption(ChannelOption.ALLOCATOR, BufferManager.getInstance().getAllocator()); tcpServerBootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay); tcpServerBootstrap.childOption(ChannelOption.SO_KEEPALIVE, keepAlive); tcpServerBootstrap.childOption(ChannelOption.SO_RCVBUF, recvBuffer); tcpServerBootstrap.childOption(ChannelOption.SO_SNDBUF, sendBuffer); tcpServerBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, writeSpins); // Set the server options tcpServerBootstrap.option(ChannelOption.SO_BACKLOG, backlog); tcpServerBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); tcpServerBootstrap.option(ChannelOption.SO_RCVBUF, recvBuffer); tcpServerBootstrap.option(ChannelOption.SO_TIMEOUT, connectTimeout); final StringBuilder tcpUri = new StringBuilder("tcp"); final StringBuilder udpUri = new StringBuilder("udp"); if (IS_LINUX && !disableEpoll) { bossExecutorThreadFactory = new ExecutorThreadFactory("EpollServerBoss", true); bossGroup = new EpollEventLoopGroup(1, (ThreadFactory) bossExecutorThreadFactory); workerExecutorThreadFactory = new ExecutorThreadFactory("EpollServerWorker", true); workerGroup = new EpollEventLoopGroup(workerThreads, (ThreadFactory) workerExecutorThreadFactory); tcpChannelType = EpollServerSocketChannel.class; udpChannelType = EpollDatagramChannel.class; tcpUri.append("epoll"); udpUri.append("epoll"); } else { bossExecutorThreadFactory = new ExecutorThreadFactory("NioServerBoss", true); bossGroup = new NioEventLoopGroup(1, bossExecutorThreadFactory); workerExecutorThreadFactory = new ExecutorThreadFactory("NioServerWorker", true); workerGroup = new NioEventLoopGroup(workerThreads, workerExecutorThreadFactory); tcpChannelType = NioServerSocketChannel.class; udpChannelType = NioDatagramChannel.class; tcpUri.append("nio"); udpUri.append("nio"); } tcpUri.append("://").append(bindInterface).append(":").append(port); udpUri.append("://").append(bindInterface).append(":").append(port); URI u = null; try { u = new URI(tcpUri.toString()); } catch (URISyntaxException e) { log.warn("Failed TCP server URI const: [{}]. Programmer Error", tcpUri, e); } tcpServerURI = u; try { u = new URI(udpUri.toString()); } catch (URISyntaxException e) { log.warn("Failed UDP server URI const: [{}]. Programmer Error", udpUri, e); } udpServerURI = u; log.info(">>>>> Starting OnRamp TCP Listener on [{}]...", tcpServerURI); log.info(">>>>> Starting OnRamp UDP Listener on [{}]...", udpServerURI); final ChannelFuture cf = tcpServerBootstrap.channel(tcpChannelType).group(bossGroup, workerGroup) .bind(bindSocket).awaitUninterruptibly() .addListener(new GenericFutureListener<Future<? super Void>>() { public void operationComplete(final Future<? super Void> f) throws Exception { log.info("<<<<< OnRamp TCP Listener on [{}] Started", tcpServerURI); }; }).awaitUninterruptibly(); final ChannelFuture ucf = udpBootstrap.channel(udpChannelType).group(workerGroup) .option(ChannelOption.SO_BROADCAST, true).handler(new UDPPipelineFactory()).bind(bindSocket) .awaitUninterruptibly().addListener(new GenericFutureListener<Future<? super Void>>() { public void operationComplete(final Future<? super Void> f) throws Exception { log.info("<<<<< OnRamp UDP Listener on [{}] Started", udpServerURI); }; }).awaitUninterruptibly(); tcpServerChannel = cf.channel(); udpServerChannel = ucf.channel(); tcpCloseFuture = tcpServerChannel.closeFuture(); udpCloseFuture = udpServerChannel.closeFuture(); Runtime.getRuntime().addShutdownHook(shutdownHook); }
From source file:com.heliosapm.tsdblite.Server.java
License:Apache License
/** * Creates a new Server/*from w ww. j ava 2 s. c o m*/ */ private Server() { log.info("Configuring Netty Server...."); String serverLevel = ConfigurationHelper.getSystemThenEnvProperty(Constants.CONF_NETTY_SERVER_LOGLEVEL, Constants.DEFAULT_NETTY_SERVER_LOGLEVEL); loggingHandler = new LoggingHandler(getClass(), LogLevel.valueOf(serverLevel.trim().toUpperCase())); iface = ConfigurationHelper.getSystemThenEnvProperty(Constants.CONF_NETTY_IFACE, Constants.DEFAULT_NETTY_IFACE); port = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_PORT, Constants.DEFAULT_NETTY_PORT); int bossThreads = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_BOSS_THREADS, Constants.DEFAULT_NETTY_BOSS_THREADS); int workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_WORKER_THREADS, Constants.DEFAULT_NETTY_WORKER_THREADS); int groupThreads = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_CGROUP_THREADS, Constants.DEFAULT_NETTY_CGROUP_THREADS); bossPool = new ManagedDefaultExecutorServiceFactory("bossPool").newExecutorService(bossThreads); // ForkJoinPoolManager.register(bossPool, BOSS_POOL_ON); workerPool = new ManagedDefaultExecutorServiceFactory("workerPool").newExecutorService(workerThreads); // ForkJoinPoolManager.register(workerPool, WORKER_POOL_ON); channelGroupPool = new ManagedDefaultExecutorServiceFactory("groupPool").newExecutorService(groupThreads); // ForkJoinPoolManager.register(channelGroupPool, CGROUP_POOL_ON); bossGroup = new NioEventLoopGroup(bossThreads, bossPool, selectorProvider); workerGroup = new NioEventLoopGroup(bossThreads, workerPool, selectorProvider); bootStrap = new ServerBootstrap(); groupExecutor = new DefaultEventExecutor(channelGroupPool); channelGroup = new DefaultChannelGroup("TSDBLite", groupExecutor); MetricCache.getInstance(); // fire up the metric cache before we start taking calls log.info("Selector: {}", selectorProvider.getClass().getName()); bootStrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).handler(loggingHandler) .childHandler(this); try { serverChannel = (NioServerSocketChannel) bootStrap.bind(iface, port).sync().channel(); } catch (Exception ex) { stop(); log.error("Failed to bind Netty server on [{}:{}]", iface, port, ex); throw new RuntimeException("Failed to bind Netty server", ex); } JMXHelper.registerMBean(this, OBJECT_NAME); log.info( "\n\t======================================\n\tNetty Server started on [{}:{}]\n\t======================================", iface, port); }
From source file:com.intuit.karate.netty.FeatureServer.java
License:Open Source License
private FeatureServer(Feature feature, int requestedPort, SslContext sslCtx, Map<String, Object> arg) { ssl = sslCtx != null;//from ww w.j a va 2s . c o m bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(); FeatureServerInitializer initializer = new FeatureServerInitializer(sslCtx, feature, arg, () -> stop()); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(getClass().getName(), LogLevel.TRACE)).childHandler(initializer); channel = b.bind(requestedPort).sync().channel(); InetSocketAddress isa = (InetSocketAddress) channel.localAddress(); host = "127.0.0.1"; //isa.getHostString(); port = isa.getPort(); logger.info("server started - {}://{}:{}", ssl ? "https" : "http", host, port); } catch (Exception e) { throw new RuntimeException(e); } }
From source file:com.xx_dev.apn.proxy.ApnProxyServerChannelInitializer.java
License:Apache License
@Override public void initChannel(SocketChannel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); pipeline.addLast("idlestate", new IdleStateHandler(0, 0, 3, TimeUnit.MINUTES)); pipeline.addLast("idlehandler", new ApnProxyIdleHandler()); pipeline.addLast("datalog", new LoggingHandler("PRE_BYTE_LOGGER", LogLevel.DEBUG)); if (ApnProxyConfig.getConfig().getListenType() == ApnProxyListenType.SSL) { SSLEngine engine = ApnProxySSLContextFactory.createServerSSLSSLEngine(); pipeline.addLast("apnproxy.encrypt", new SslHandler(engine)); } else if (ApnProxyConfig.getConfig().getListenType() == ApnProxyListenType.AES) { byte[] key = ApnProxyConfig.getConfig().getKey(); byte[] iv = ApnProxyConfig.getConfig().getIv(); pipeline.addLast("apnproxy.encrypt", new ApnProxyAESEncoder(key, iv)); pipeline.addLast("apnproxy.decrypt", new ApnProxyAESDecoder(key, iv)); }//from w w w .j a v a 2 s .co m pipeline.addLast("log", new LoggingHandler("BYTE_LOGGER", LogLevel.INFO)); pipeline.addLast("codec", new HttpServerCodec()); pipeline.addLast(ApnProxyPreHandler.HANDLER_NAME, new ApnProxyPreHandler()); pipeline.addLast(ApnProxySchemaHandler.HANDLER_NAME, new ApnProxySchemaHandler()); }
From source file:com.xx_dev.apn.socks.local.PortForwardProxyFrontendInitializer.java
License:Apache License
@Override public void initChannel(SocketChannel ch) throws SSLException { ch.pipeline().addLast(new LoggingHandler("BYTE_LOGGER", LogLevel.DEBUG)); ch.pipeline().addLast(new PortForwardProxyFrontendHandler(remoteHost, remotePort)); }
From source file:com.xx_dev.apn.socks.remote.SocksServer.java
License:Apache License
public static void main(String[] args) throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); try {/*from w w w . ja va 2 s . c om*/ ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler("NET_LOGGER", LogLevel.DEBUG)) .childHandler(new SocksServerInitializer()); b.bind(RemoteConfig.ins().getListenPort()).sync().channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:com.xx_dev.apn.socks.remote.SocksServerInitializer.java
License:Apache License
@Override public void initChannel(SocketChannel socketChannel) throws Exception { ChannelPipeline p = socketChannel.pipeline(); p.addLast(new FakeHttpServerDecoder()); p.addLast(new FakeHttpServerEncoder()); p.addLast("log", new LoggingHandler("BYTE_LOGGER", LogLevel.DEBUG)); p.addLast(new SocksInitRequestDecoder()); p.addLast(socksMessageEncoder);//w ww .j av a 2 s . c o m p.addLast(socksServerHandler); }