List of usage examples for io.netty.handler.codec LengthFieldBasedFrameDecoder LengthFieldBasedFrameDecoder
public LengthFieldBasedFrameDecoder(int maxFrameLength, int lengthFieldOffset, int lengthFieldLength, int lengthAdjustment, int initialBytesToStrip)
From source file:io.atomix.catalyst.transport.netty.NettyClient.java
License:Apache License
@Override public CompletableFuture<Connection> connect(Address address) { Assert.notNull(address, "address"); ThreadContext context = ThreadContext.currentContextOrThrow(); CompletableFuture<Connection> future = new ComposableFuture<>(); LOGGER.info("Connecting to {}", address); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(transport.eventLoopGroup()).channel(NioSocketChannel.class) .handler(new ChannelInitializer<SocketChannel>() { @Override//from w w w . j a va2 s . c o m protected void initChannel(SocketChannel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); if (transport.properties().sslEnabled()) { pipeline.addFirst( new SslHandler(new NettyTls(transport.properties()).initSslEngine(true))); } pipeline.addLast(FIELD_PREPENDER); pipeline.addLast(new LengthFieldBasedFrameDecoder(transport.properties().maxFrameSize(), 0, 4, 0, 4)); pipeline.addLast( new NettyHandler(connections, future::complete, context, transport.properties())); } }); bootstrap.option(ChannelOption.TCP_NODELAY, transport.properties().tcpNoDelay()); bootstrap.option(ChannelOption.SO_KEEPALIVE, transport.properties().tcpKeepAlive()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, transport.properties().connectTimeout()); bootstrap.option(ChannelOption.ALLOCATOR, ALLOCATOR); if (transport.properties().sendBufferSize() != -1) { bootstrap.option(ChannelOption.SO_SNDBUF, transport.properties().sendBufferSize()); } if (transport.properties().receiveBufferSize() != -1) { bootstrap.option(ChannelOption.SO_RCVBUF, transport.properties().receiveBufferSize()); } bootstrap.connect(address.socketAddress()).addListener(channelFuture -> { if (channelFuture.isSuccess()) { LOGGER.info("Connected to {}", address); } else { context.execute(() -> future.completeExceptionally(channelFuture.cause())); } }); return future; }
From source file:io.atomix.catalyst.transport.netty.NettyServer.java
License:Apache License
/** * Starts listening for the given member. */// w ww . j av a 2 s.c om private void listen(Address address, Consumer<Connection> listener, ThreadContext context) { channelGroup = new DefaultChannelGroup("catalyst-acceptor-channels", GlobalEventExecutor.INSTANCE); handler = new ServerHandler(connections, listener, context, transport.properties()); final ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(transport.eventLoopGroup()).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); if (transport.properties().sslEnabled()) { pipeline.addFirst( new SslHandler(new NettyTls(transport.properties()).initSslEngine(false))); } pipeline.addLast(FIELD_PREPENDER); pipeline.addLast(new LengthFieldBasedFrameDecoder(transport.properties().maxFrameSize(), 0, 4, 0, 4)); pipeline.addLast(handler); } }).option(ChannelOption.SO_BACKLOG, transport.properties().acceptBacklog()) .option(ChannelOption.TCP_NODELAY, transport.properties().tcpNoDelay()) .option(ChannelOption.SO_REUSEADDR, transport.properties().reuseAddress()) .childOption(ChannelOption.ALLOCATOR, ALLOCATOR) .childOption(ChannelOption.SO_KEEPALIVE, transport.properties().tcpKeepAlive()); if (transport.properties().sendBufferSize() != -1) { bootstrap.childOption(ChannelOption.SO_SNDBUF, transport.properties().sendBufferSize()); } if (transport.properties().receiveBufferSize() != -1) { bootstrap.childOption(ChannelOption.SO_RCVBUF, transport.properties().receiveBufferSize()); } LOGGER.info("Binding to {}", address); ChannelFuture bindFuture = bootstrap.bind(address.socketAddress()); bindFuture.addListener((ChannelFutureListener) channelFuture -> { if (channelFuture.isSuccess()) { listening = true; context.executor().execute(() -> { LOGGER.info("Listening at {}", bindFuture.channel().localAddress()); listenFuture.complete(null); }); } else { context.execute(() -> listenFuture.completeExceptionally(channelFuture.cause())); } }); channelGroup.add(bindFuture.channel()); }
From source file:io.atomix.catalyst.transport.NettyClient.java
License:Apache License
@Override public CompletableFuture<Connection> connect(Address address) { Assert.notNull(address, "address"); ThreadContext context = ThreadContext.currentContextOrThrow(); CompletableFuture<Connection> future = new ComposableFuture<>(); LOGGER.info("Connecting to {}", address); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(eventLoopGroup).channel( eventLoopGroup instanceof EpollEventLoopGroup ? EpollSocketChannel.class : NioSocketChannel.class) .handler(new ChannelInitializer<SocketChannel>() { @Override//from ww w . j av a 2s . c o m protected void initChannel(SocketChannel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); pipeline.addLast(FIELD_PREPENDER); pipeline.addLast(new LengthFieldBasedFrameDecoder(1024 * 32, 0, 2, 0, 2)); pipeline.addLast(new NettyHandler(connections, future::complete, context)); } }); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_KEEPALIVE, true); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000); bootstrap.option(ChannelOption.ALLOCATOR, ALLOCATOR); bootstrap.connect(address.socketAddress()).addListener(channelFuture -> { if (channelFuture.isSuccess()) { LOGGER.info("Connected to {}", address); } else { context.execute(() -> future.completeExceptionally(channelFuture.cause())); } }); return future; }
From source file:io.atomix.catalyst.transport.NettyServer.java
License:Apache License
/** * Starts listening for the given member. */// ww w. j ava2s . c om private void listen(Address address, Consumer<Connection> listener, ThreadContext context) { channelGroup = new DefaultChannelGroup("catalyst-acceptor-channels", GlobalEventExecutor.INSTANCE); handler = new ServerHandler(connections, listener, context); final ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(eventLoopGroup) .channel(eventLoopGroup instanceof EpollEventLoopGroup ? EpollServerSocketChannel.class : NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { ChannelPipeline pipeline = channel.pipeline(); pipeline.addLast(FIELD_PREPENDER); pipeline.addLast(new LengthFieldBasedFrameDecoder(1024 * 32, 0, 2, 0, 2)); pipeline.addLast(handler); } }).option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.ALLOCATOR, ALLOCATOR) .childOption(ChannelOption.SO_KEEPALIVE, true); LOGGER.info("Binding to {}", address); ChannelFuture bindFuture = bootstrap.bind(address.socketAddress()); bindFuture.addListener((ChannelFutureListener) channelFuture -> { if (channelFuture.isSuccess()) { listening = true; context.executor().execute(() -> { LOGGER.info("Listening at {}", bindFuture.channel().localAddress()); listenFuture.complete(null); }); } else { context.execute(() -> listenFuture.completeExceptionally(channelFuture.cause())); } }); channelGroup.add(bindFuture.channel()); }
From source file:io.hydramq.network.NetworkTopic.java
License:Open Source License
@Override protected ChannelInitializer<Channel> channelInitializer() { return new ChannelInitializer<Channel>() { @Override/*from w ww . ja va 2 s . c o m*/ protected void initChannel(final Channel ch) throws Exception { ConversionContext conversionContext = ConversionContext.topicProtocol(); ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(MAX_FRAME_LENGTH, 0, 4, 0, 4)); ch.pipeline().addLast("commandDecoder", new CommandDecoder(conversionContext)); ch.pipeline().addLast("frameEncoder", new LengthFieldPrepender(4)); ch.pipeline().addLast("commandEncoder", new CommandEncoder(conversionContext)); ch.pipeline().addLast("logic", new RequestResponseHandler(NetworkTopic.this)); } }; }
From source file:io.hydramq.network.NetworkTopicManager.java
License:Open Source License
@Override protected ChannelInitializer<Channel> channelInitializer() { return new ChannelInitializer<Channel>() { @Override//from w w w . j a v a2 s. c o m protected void initChannel(Channel ch) throws Exception { ConversionContext conversionContext = ConversionContext.topicManagerProtocol(); ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(MAX_FRAME_LENGTH, 0, 4, 0, 4)); ch.pipeline().addLast("commandDecoder", new CommandDecoder(conversionContext)); ch.pipeline().addLast("frameEncoder", new LengthFieldPrepender(4)); ch.pipeline().addLast("commandEncoder", new CommandEncoder(conversionContext)); ch.pipeline().addLast("logic", new RequestResponseHandler(NetworkTopicManager.this)); } }; }
From source file:io.hydramq.network.server.HydraServerTransport.java
License:Open Source License
public CompletableFuture<Integer> start() { CompletableFuture<Integer> future = new CompletableFuture<>(); try {// www. j ava 2s . c o m ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(boss); bootstrap.channel(NioServerSocketChannel.class); bootstrap.childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(final Channel ch) throws Exception { ch.attr(ChannelAttributes.COMMAND_FUTURES).set(new ConcurrentHashMap<>()); ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(MAX_FRAME_LENGTH, 0, 4, 0, 4)); ch.pipeline().addLast("frameEncoder", new LengthFieldPrepender(4)); if (verbose) { ch.pipeline().addLast(new LoggingHandler(HydraServerTransport.class, LogLevel.INFO)); } ch.pipeline().addLast(ProtocolSelector.NAME, protocolSelector); } }); channel = bootstrap.bind(port).sync().channel(); future.complete(((InetSocketAddress) channel.localAddress()).getPort()); } catch (InterruptedException ex) { future.completeExceptionally(ex); } return future; }
From source file:io.reactivesocket.netty.tcp.client.ClientTcpDuplexConnection.java
License:Apache License
public static Publisher<ClientTcpDuplexConnection> create(SocketAddress address, EventLoopGroup eventLoopGroup) { return s -> { CopyOnWriteArrayList<Observer<Frame>> subjects = new CopyOnWriteArrayList<>(); ReactiveSocketClientHandler clientHandler = new ReactiveSocketClientHandler(subjects); Bootstrap bootstrap = new Bootstrap(); ChannelFuture connect = bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class) .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.AUTO_READ, true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000) .handler(new ChannelInitializer<SocketChannel>() { @Override//from ww w . java 2s. c om protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE >> 1, 0, BitUtil.SIZE_OF_INT, -1 * BitUtil.SIZE_OF_INT, 0), clientHandler); } }).connect(address); connect.addListener(connectFuture -> { if (connectFuture.isSuccess()) { Channel ch = connect.channel(); s.onNext(new ClientTcpDuplexConnection(ch, subjects)); s.onComplete(); } else { s.onError(connectFuture.cause()); } }); }; }
From source file:io.reactivesocket.netty.tcp.server.ReactiveSocketServerHandler.java
License:Apache License
@Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { ChannelPipeline cp = ctx.pipeline(); if (cp.get(LengthFieldBasedFrameDecoder.class) == null) { ctx.pipeline().addBefore(ctx.name(), LengthFieldBasedFrameDecoder.class.getName(), new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE >> 1, 0, BitUtil.SIZE_OF_INT, -1 * BitUtil.SIZE_OF_INT, 0)); }/* w w w .j a va 2s .c o m*/ }
From source file:io.reactivex.netty.RemoteObservable.java
License:Apache License
private static <T> Observable<T> createTcpConnectionToServer(final ConnectConfiguration<T> params, final RemoteUnsubscribe remoteUnsubscribe, final ConnectionMetrics metrics) { // XXX remove this after onErrorFlatMap Observable.error() + dematerialize is fixed final PublishSubject<T> proxy = PublishSubject.create(); // necessary to inject connection errors into observable returned // XXX/*from w ww . ja v a2 s .co m*/ final Decoder<T> decoder = params.getDecoder(); RxNetty.createTcpClient(params.getHost(), params.getPort(), new PipelineConfiguratorComposite<RemoteRxEvent, RemoteRxEvent>( new PipelineConfigurator<RemoteRxEvent, RemoteRxEvent>() { @Override public void configureNewPipeline(ChannelPipeline pipeline) { // pipeline.addFirst(new LoggingHandler(LogLevel.ERROR)); // uncomment to enable debug logging pipeline.addLast("frameEncoder", new LengthFieldPrepender(4)); // 4 bytes to encode length pipeline.addLast("frameDecoder", new LengthFieldBasedFrameDecoder(524288, 0, 4, 0, 4)); // max frame = half MB } }, new RxEventPipelineConfigurator())) .connect() // send subscription request, get input stream .flatMap( new Func1<ObservableConnection<RemoteRxEvent, RemoteRxEvent>, Observable<RemoteRxEvent>>() { @Override public Observable<RemoteRxEvent> call( final ObservableConnection<RemoteRxEvent, RemoteRxEvent> connection) { connection.writeAndFlush(RemoteRxEvent.subscribed(params.getName(), params.getSubscribeParameters())); // send subscribe event to server remoteUnsubscribe.setConnection(connection); return connection.getInput(); } }) // retry subscription attempts .retry(params.getSubscribeRetryAttempts()) // handle subscription errors TODO add back after dematerialize fix // .onErrorFlatMap(new Func1<OnErrorThrowable,Observable<RemoteRxEvent>>(){ // @Override // public Observable<RemoteRxEvent> call(OnErrorThrowable t1) { // params.getSubscribeErrorHandler().call(params, t1); // if (!params.isSuppressSubscribeErrors()){ // return Observable.error(t1.); // } // return Observable.empty(); // } // }) // XXX remove this after onErrorFlatMap Observable.error() + dematerialize is fixed .doOnError(new Action1<Throwable>() { @Override public void call(Throwable t1) { params.getSubscribeErrorHandler().call(new SubscribeInfo(params.getHost(), params.getPort(), params.getName(), params.getSubscribeParameters()), t1); if (!params.isSuppressSubscribeErrors()) { proxy.onError(t1); // inject error into stream } } }) // XXX // data received from server .map(new Func1<RemoteRxEvent, Notification<T>>() { @Override public Notification<T> call(RemoteRxEvent rxEvent) { if (rxEvent.getType() == RemoteRxEvent.Type.next) { metrics.incrementNextCount(); return Notification.createOnNext(decoder.decode(rxEvent.getData())); } else if (rxEvent.getType() == RemoteRxEvent.Type.error) { metrics.incrementErrorCount(); return Notification.createOnError(fromBytesToThrowable(rxEvent.getData())); } else if (rxEvent.getType() == RemoteRxEvent.Type.completed) { metrics.incrementCompletedCount(); return Notification.createOnCompleted(); } else { throw new RuntimeException( "RemoteRxEvent of type:" + rxEvent.getType() + ", not supported."); } } }) // handle decoding exceptions // XXX TODO replace with onErrorFlatMap after dematerialize fix .doOnError(new Action1<Throwable>() { @Override public void call(Throwable t1) { // TODO currently does not support passing value, // without onErrorFlatMap fix, settle for null params.getDeocdingErrorHandler().call(null, t1); if (!params.isSuppressDecodingErrors()) { proxy.onError(t1); } } }) // XXX .<T>dematerialize() // XXX remove this after onErrorFlatMap Observable.error() + dematerialize is fixed .subscribe(new Observer<T>() { @Override public void onCompleted() { proxy.onCompleted(); } @Override public void onError(Throwable e) { proxy.onError(e); } @Override public void onNext(T t) { proxy.onNext(t); } }); return proxy; // XXX }