Example usage for io.netty.util.concurrent Promise addListener

List of usage examples for io.netty.util.concurrent Promise addListener

Introduction

In this page you can find the example usage for io.netty.util.concurrent Promise addListener.

Prototype

@Override
    Promise<V> addListener(GenericFutureListener<? extends Future<? super V>> listener);

Source Link

Usage

From source file:cc.agentx.client.net.nio.XConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksCmdRequest request) throws Exception {
    boolean proxyMode = isAgentXNeeded(request.host());
    log.info("\tClient -> Proxy           \tTarget {}:{} [{}]", request.host(), request.port(),
            proxyMode ? "AGENTX" : "DIRECT");
    Promise<Channel> promise = ctx.executor().newPromise();
    promise.addListener(new FutureListener<Channel>() {
        @Override//  www .j a v a  2  s . co m
        public void operationComplete(final Future<Channel> future) throws Exception {
            final Channel outboundChannel = future.getNow();
            if (future.isSuccess()) {
                ctx.channel().writeAndFlush(new SocksCmdResponse(SocksCmdStatus.SUCCESS, request.addressType()))
                        .addListener(channelFuture -> {
                            ByteBuf byteBuf = Unpooled.buffer();
                            request.encodeAsByteBuf(byteBuf);
                            if (byteBuf.hasArray()) {
                                byte[] xRequestBytes = new byte[byteBuf.readableBytes()];
                                byteBuf.getBytes(0, xRequestBytes);

                                if (proxyMode) {
                                    // handshaking to remote proxy
                                    xRequestBytes = requestWrapper.wrap(xRequestBytes);
                                    outboundChannel.writeAndFlush(Unpooled.wrappedBuffer(
                                            exposeRequest ? xRequestBytes : wrapper.wrap(xRequestBytes)));
                                }

                                // task handover
                                ReferenceCountUtil.retain(request); // auto-release? a trap?
                                ctx.pipeline().remove(XConnectHandler.this);
                                outboundChannel.pipeline().addLast(new XRelayHandler(ctx.channel(),
                                        proxyMode ? wrapper : rawWrapper, false));
                                ctx.pipeline().addLast(new XRelayHandler(outboundChannel,
                                        proxyMode ? wrapper : rawWrapper, true));
                            }
                        });
            } else {
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));

                if (ctx.channel().isActive()) {
                    ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
                }
            }
        }
    });

    String host = request.host();
    int port = request.port();
    if (host.equals(config.getConsoleDomain())) {
        host = "localhost";
        port = config.getConsolePort();
    } else if (proxyMode) {
        host = config.getServerHost();
        port = config.getServerPort();
    }

    // ping target
    bootstrap.group(ctx.channel().eventLoop()).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
            .handler(new XPingHandler(promise, System.currentTimeMillis())).connect(host, port)
            .addListener(new ChannelFutureListener() {
                @Override
                public void operationComplete(ChannelFuture future) throws Exception {
                    if (!future.isSuccess()) {
                        ctx.channel().writeAndFlush(
                                new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                        if (ctx.channel().isActive()) {
                            ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
                        }
                    }
                }
            });
}

From source file:cc.agentx.server.net.nio.XConnectHandler.java

License:Apache License

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
    try {//from w w  w . j ava 2 s.co  m
        ByteBuf byteBuf = (ByteBuf) msg;
        if (!byteBuf.hasArray()) {
            byte[] bytes = new byte[byteBuf.readableBytes()];
            byteBuf.getBytes(0, bytes);

            if (!requestParsed) {
                if (!exposedRequest) {
                    bytes = wrapper.unwrap(bytes);
                    if (bytes == null) {
                        log.info("\tClient -> Proxy           \tHalf Request");
                        return;
                    }
                }
                XRequest xRequest = requestWrapper.parse(bytes);
                String host = xRequest.getHost();
                int port = xRequest.getPort();
                int dataLength = xRequest.getSubsequentDataLength();
                if (dataLength > 0) {
                    byte[] tailData = Arrays.copyOfRange(bytes, bytes.length - dataLength, bytes.length);
                    if (exposedRequest) {
                        tailData = wrapper.unwrap(tailData);
                        if (tailData != null) {
                            tailDataBuffer.write(tailData, 0, tailData.length);
                        }
                    } else {
                        tailDataBuffer.write(tailData, 0, tailData.length);
                    }
                }
                log.info("\tClient -> Proxy           \tTarget {}:{}{}", host, port,
                        DnsCache.isCached(host) ? " [Cached]" : "");
                if (xRequest.getAtyp() == XRequest.Type.DOMAIN) {
                    try {
                        host = DnsCache.get(host);
                        if (host == null) {
                            host = xRequest.getHost();
                        }
                    } catch (UnknownHostException e) {
                        log.warn("\tClient <- Proxy           \tBad DNS! ({})", e.getMessage());
                        ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
                        return;
                    }
                }

                Promise<Channel> promise = ctx.executor().newPromise();
                promise.addListener(new FutureListener<Channel>() {
                    @Override
                    public void operationComplete(final Future<Channel> future) throws Exception {
                        final Channel outboundChannel = future.getNow();
                        if (future.isSuccess()) {
                            // handle tail
                            byte[] tailData = tailDataBuffer.toByteArray();
                            tailDataBuffer.close();
                            if (tailData.length > 0) {
                                log.info("\tClient ==========> Target \tSend Tail [{} bytes]", tailData.length);
                            }
                            outboundChannel
                                    .writeAndFlush((tailData.length > 0) ? Unpooled.wrappedBuffer(tailData)
                                            : Unpooled.EMPTY_BUFFER)
                                    .addListener(channelFuture -> {
                                        // task handover
                                        outboundChannel.pipeline()
                                                .addLast(new XRelayHandler(ctx.channel(), wrapper, false));
                                        ctx.pipeline()
                                                .addLast(new XRelayHandler(outboundChannel, wrapper, true));
                                        ctx.pipeline().remove(XConnectHandler.this);
                                    });

                        } else {
                            if (ctx.channel().isActive()) {
                                ctx.writeAndFlush(Unpooled.EMPTY_BUFFER)
                                        .addListener(ChannelFutureListener.CLOSE);
                            }
                        }
                    }
                });

                final String finalHost = host;
                bootstrap.group(ctx.channel().eventLoop()).channel(NioSocketChannel.class)
                        .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000)
                        .option(ChannelOption.SO_KEEPALIVE, true)
                        .handler(new XPingHandler(promise, System.currentTimeMillis())).connect(host, port)
                        .addListener(new ChannelFutureListener() {
                            @Override
                            public void operationComplete(ChannelFuture future) throws Exception {
                                if (!future.isSuccess()) {
                                    if (ctx.channel().isActive()) {
                                        log.warn("\tClient <- Proxy           \tBad Ping! ({}:{})", finalHost,
                                                port);
                                        ctx.writeAndFlush(Unpooled.EMPTY_BUFFER)
                                                .addListener(ChannelFutureListener.CLOSE);
                                    }
                                }
                            }
                        });

                requestParsed = true;
            } else {
                bytes = wrapper.unwrap(bytes);
                if (bytes != null)
                    tailDataBuffer.write(bytes, 0, bytes.length);
            }
        }
    } finally {
        ReferenceCountUtil.release(msg);
    }
}

From source file:cn.david.socks.SocksServerConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksCmdRequest request) throws Exception {
    Promise<Channel> promise = ctx.executor().newPromise();
    promise.addListener(new GenericFutureListener<Future<Channel>>() {
        @Override//from   w  w  w  .ja v a  2  s .c om
        public void operationComplete(final Future<Channel> future) throws Exception {
            final Channel outboundChannel = future.getNow();
            if (future.isSuccess()) {
                ctx.channel().writeAndFlush(new SocksCmdResponse(SocksCmdStatus.SUCCESS, request.addressType()))
                        .addListener(new ChannelFutureListener() {
                            @Override
                            public void operationComplete(ChannelFuture channelFuture) {
                                ctx.pipeline().remove(SocksServerConnectHandler.this);
                                outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                                ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                            }
                        });
            } else {
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                SocksServerUtils.closeOnFlush(ctx.channel());
            }
        }
    });

    final Channel inboundChannel = ctx.channel();
    b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
            .handler(new DirectClientHandler(promise));

    b.connect(request.host(), request.port()).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                // Connection established use handler provided results
            } else {
                // Close the connection if the connection attempt has failed.
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                SocksServerUtils.closeOnFlush(ctx.channel());
            }
        }
    });
}

From source file:com.addthis.hydra.query.tracker.DetailedStatusHandler.java

License:Apache License

public void handle() {
    if (queryEntry != null) {
        if (queryEntry.lastSourceInfo == null) {
            Promise<TaskSourceInfo[]> promise = new DefaultPromise<>(ctx.executor());
            promise.addListener(this);
            DetailedStatusTask statusTask = new DetailedStatusTask(promise);
            queryEntry.trackerHandler.submitDetailedStatusTask(statusTask);
        } else {/*from   w  w  w . jav  a2s. com*/
            onSuccess(queryEntry.getStat());
        }
    } else {
        onFailure(new RuntimeException("query entry unexpectedly null"));
    }
}

From source file:com.addthis.hydra.query.web.DetailedStatusHandler.java

License:Apache License

public void handle() {
    if (queryEntry != null) {
        Promise<QueryEntryInfo> promise = new DefaultPromise<>(ctx.executor());
        promise.addListener(this);
        queryEntry.getDetailedQueryEntryInfo(promise);
    } else {/*  w w w  .  j  a  v  a2s.c  om*/
        onFailure(new RuntimeException("query entry unexpectedly null"));
    }
}

From source file:com.cloudera.livy.client.local.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *///from  ww  w.j a  v  a  2  s.  c  om
public static Promise<Rpc> createClient(final LocalConf config, final EventLoopGroup eloop, String host,
        int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    int connectTimeoutMs = (int) config.getTimeAsMs(RPC_CLIENT_CONNECT_TIMEOUT);

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask,
            config.getTimeAsMs(RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(config, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(config, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:com.cloudera.livy.rsc.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *///from  www.  j a v  a2s  . c o m
public static Promise<Rpc> createClient(final RSCConf config, final EventLoopGroup eloop, String host, int port,
        final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    int connectTimeoutMs = (int) config.getTimeAsMs(RPC_CLIENT_CONNECT_TIMEOUT);

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask,
            config.getTimeAsMs(RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(config, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(config, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:com.emin.igwmp.skm.core.netty.handler.SocksServerConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksMessage message) throws Exception {
    if (message instanceof Socks4CommandRequest) {
        final Socks4CommandRequest request = (Socks4CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            @Override/*  ww w  .j ava 2  s.c  o  m*/
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.SUCCESS));

                    responseFuture.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                            ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else if (message instanceof Socks5CommandRequest) {
        final Socks5CommandRequest request = (Socks5CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            @Override
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.SUCCESS,
                                    request.dstAddrType(), request.dstAddr(), request.dstPort()));

                    responseFuture.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                            ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else {
        ctx.close();
    }
}

From source file:com.flysoloing.learning.network.netty.socksproxy.SocksServerConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksMessage message) throws Exception {
    if (message instanceof Socks4CommandRequest) {
        final Socks4CommandRequest request = (Socks4CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.SUCCESS));

                    responseFuture.addListener(new ChannelFutureListener() {
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                            ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                        }/*from  w ww .  ja  v  a  2s  .  c o m*/
                    });
                } else {
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else if (message instanceof Socks5CommandRequest) {
        final Socks5CommandRequest request = (Socks5CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.SUCCESS,
                                    request.dstAddrType(), request.dstAddr(), request.dstPort()));

                    responseFuture.addListener(new ChannelFutureListener() {
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                            ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else {
        ctx.close();
    }
}

From source file:com.fsocks.SocksServerConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksMessage message) throws Exception {
    if (message instanceof Socks4CommandRequest) {
        final Socks4CommandRequest request = (Socks4CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.SUCCESS));

                    responseFuture.addListener(new ChannelFutureListener() {
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                            ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                        }//from   w w w.j a v a  2s .c  o m
                    });
                } else {
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else if (message instanceof Socks5CommandRequest) {
        final Socks5CommandRequest request = (Socks5CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    //??successclient
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.SUCCESS,
                                    request.dstAddrType(), request.dstAddr(), request.dstPort()));

                    responseFuture.addListener(new ChannelFutureListener() {
                        public void operationComplete(ChannelFuture channelFuture) {
                            //clientremote???????local proxy
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                            ctx.pipeline().addLast(new RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
        //?
        logger.info("? addressType : " + request.dstAddrType() + ", cmdType:" + request.type()
                + ", host:" + request.dstAddr() + ", port:" + request.dstPort());
        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                    logger.info("?? addressType : " + request.dstAddrType()
                            + ", cmdType:" + request.type() + ", host:" + request.dstAddr() + ", port:"
                            + request.dstPort());
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else {
        ctx.close();
    }
}