Example usage for io.netty.util.concurrent Promise addListener

List of usage examples for io.netty.util.concurrent Promise addListener

Introduction

In this page you can find the example usage for io.netty.util.concurrent Promise addListener.

Prototype

@Override
    Promise<V> addListener(GenericFutureListener<? extends Future<? super V>> listener);

Source Link

Usage

From source file:io.gatling.http.client.impl.DefaultHttpClient.java

License:Apache License

private Future<Channel> installHttp2Handler(HttpTx tx, Channel channel, ChannelPool channelPool) {

    Promise<Channel> whenAlpn = channel.eventLoop().newPromise();

    channel.pipeline().addAfter(SSL_HANDLER, ALPN_HANDLER,
            new ApplicationProtocolNegotiationHandler(ApplicationProtocolNames.HTTP_1_1) {
                @Override//  www  .j a v a2 s .  c o  m
                protected void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception {

                    switch (protocol) {
                    case ApplicationProtocolNames.HTTP_2:
                        LOGGER.debug("ALPN led to HTTP/2 with remote {}", tx.request.getUri().getHost());
                        tx.listener.onProtocolAwareness(true);
                        Http2Connection connection = new DefaultHttp2Connection(false);

                        HttpToHttp2ConnectionHandler http2Handler = new HttpToHttp2ConnectionHandlerBuilder()
                                .initialSettings(Http2Settings.defaultSettings()) // FIXME override?
                                .connection(connection)
                                .frameListener(new DelegatingDecompressorFrameListener(connection,
                                        new ChunkedInboundHttp2ToHttpAdapter(connection, false, true,
                                                whenAlpn)))
                                .build();

                        ctx.pipeline().addLast(HTTP2_HANDLER, http2Handler).addLast(APP_HTTP2_HANDLER,
                                new Http2AppHandler(connection, http2Handler, channelPool, config));

                        channelPool.offer(channel);

                        SslHandler sslHandler = (SslHandler) ctx.pipeline().get(SSL_HANDLER);
                        Set<String> subjectAlternativeNames = Tls
                                .extractSubjectAlternativeNames(sslHandler.engine());
                        if (!subjectAlternativeNames.isEmpty()) {
                            channelPool.addCoalescedChannel(subjectAlternativeNames,
                                    (InetSocketAddress) channel.remoteAddress(), channel, tx.key);
                        }
                        break;

                    case ApplicationProtocolNames.HTTP_1_1:
                        LOGGER.debug("ALPN led to HTTP/1 with remote {}", tx.request.getUri().getHost());
                        if (tx.request.isHttp2PriorKnowledge()) {
                            IllegalStateException e = new IllegalStateException(
                                    "HTTP/2 Prior knowledge was set on host " + tx.request.getUri().getHost()
                                            + " but it only supports HTTP/1");
                            whenAlpn.setFailure(e);
                            throw e;
                        }
                        tx.listener.onProtocolAwareness(false);
                        ctx.pipeline()
                                .addBefore(CHUNKED_WRITER_HANDLER, HTTP_CLIENT_CODEC, newHttpClientCodec())
                                .addBefore(CHUNKED_WRITER_HANDLER, INFLATER_HANDLER,
                                        newHttpContentDecompressor())
                                .addAfter(CHUNKED_WRITER_HANDLER, APP_HTTP_HANDLER,
                                        new HttpAppHandler(DefaultHttpClient.this, channelPool, config));
                        whenAlpn.setSuccess(ctx.channel());
                        break;

                    default:
                        IllegalStateException e = new IllegalStateException("Unknown protocol: " + protocol);
                        whenAlpn.setFailure(e);
                        ctx.close();
                        // FIXME do we really need to throw?
                        throw e;
                    }
                }
            });

    whenAlpn.addListener(f -> {
        if (!f.isSuccess()) {
            tx.listener.onThrowable(f.cause());
        }
    });

    return whenAlpn;
}

From source file:io.vertx.core.dns.impl.fix.DnsNameResolver.java

License:Apache License

private void doResolveUncached(String hostname, Promise<InetAddress> promise, DnsCache resolveCache,
        boolean trySearchDomain) {
    if (trySearchDomain) {
        Promise<InetAddress> original = promise;
        promise = new DefaultPromise<>(executor());
        FutureListener<InetAddress> globalListener = future -> {
            if (future.isSuccess()) {
                original.setSuccess(future.getNow());
            } else {
                FutureListener<InetAddress> sdListener = new FutureListener<InetAddress>() {
                    int count;

                    @Override//w  ww  . ja v a  2  s  . c o m
                    public void operationComplete(Future<InetAddress> future) throws Exception {
                        if (future.isSuccess()) {
                            original.trySuccess(future.getNow());
                        } else {
                            if (count < searchDomains.size()) {
                                String searchDomain = searchDomains.get(count++);
                                Promise<InetAddress> p = new DefaultPromise<>(executor());
                                doResolveUncached(hostname + "." + searchDomain, p, resolveCache, false);
                                p.addListener(this);
                            } else {
                                original.tryFailure(future.cause());
                            }
                        }
                    }
                };
                future.addListener(sdListener);
            }
        };
        promise.addListener(globalListener);
    }
    if (searchDomains(hostname)) {
        promise.tryFailure(new UnknownHostException(hostname));
    } else {
        final DnsNameResolverContext<InetAddress> ctx = new DnsNameResolverContext<InetAddress>(this, hostname,
                promise, resolveCache) {
            @Override
            protected boolean finishResolve(Class<? extends InetAddress> addressType,
                    List<DnsCacheEntry> resolvedEntries) {

                final int numEntries = resolvedEntries.size();
                for (int i = 0; i < numEntries; i++) {
                    final InetAddress a = resolvedEntries.get(i).address();
                    if (addressType.isInstance(a)) {
                        setSuccess(promise(), a);
                        return true;
                    }
                }
                return false;
            }
        };

        ctx.resolve();
    }
}

From source file:me.ferrybig.javacoding.teamspeakconnector.internal.TeamspeakIO.java

License:Open Source License

public Future<ComplexResponse> sendPacket(ComplexRequest req, SendBehaviour sendBehaviour) {
    if (closed) {
        if (sendBehaviour != SendBehaviour.NORMAL) {
            return this.closeFuture;
        }// w ww .j a v a2  s.  co m
        return channel.eventLoop().newFailedFuture(new TeamspeakException("Channel closed"));
    }
    Promise<ComplexResponse> prom = channel.eventLoop().newPromise();
    ChannelFuture future;
    synchronized (incomingQueue) {
        if (closed) {
            if (sendBehaviour != SendBehaviour.NORMAL) {
                return this.closeFuture;
            }
            return prom.setFailure(new TeamspeakException("Channel closed"));
        }
        incomingQueue.offer(new PendingPacket(prom, req, sendBehaviour));
        future = channel.writeAndFlush(req);
    }
    future.addListener(upstream -> {
        assert upstream == future;
        if (sendBehaviour == SendBehaviour.FORCE_CLOSE_CONNECTION) {
            channel.eventLoop().schedule(() -> {
                if (channel.isActive()) {
                    LOG.fine("Closing channel by timeout");
                    channel.close();
                }
            }, 10, TimeUnit.SECONDS);
        }
        if (!upstream.isSuccess()) {
            synchronized (incomingQueue) {
                if (incomingQueue.removeIf(prom::equals)) {
                    prom.setFailure(new TeamspeakException("Exception during sending", upstream.cause()));
                }
            }
        }
    });
    if (sendBehaviour == SendBehaviour.CLOSE_CONNECTION
            || sendBehaviour == SendBehaviour.FORCE_CLOSE_CONNECTION) {
        prom.addListener(upstream -> {
            assert upstream == prom;
            if (prom.isSuccess()) {
                synchronized (incomingQueue) {
                    this.closed = true;
                }
                channel.close();
                LOG.fine("Closing channel because sendmessage asked it");
            }
        });
    }

    return prom;
}

From source file:me.ferrybig.p2pnetwork.Peer.java

public Future<?> pingAddress(Address addr) {
    int packetNumber = pingPacketCounter.getAndIncrement();
    byte[] data = new byte[4];
    ByteBuf wrappedBuffer = Unpooled.wrappedBuffer(data);
    wrappedBuffer.writerIndex(0);//ww  w . j av a 2s .com
    wrappedBuffer.writeInt(packetNumber);
    assert wrappedBuffer.array() == data;
    Promise<PongPacket> promise = events.newPromise();
    pingListeners.put(packetNumber, promise);
    promise.addListener(e -> pingListeners.remove(packetNumber));
    boolean send = routePacket(addr, new PingPacket(data));
    if (!send) {
        promise.setFailure(new IllegalArgumentException("Unknown address"));
    }
    return promise;
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

private <A> void flush0(final A attachment, final CompletionHandler<Long, ? super A> handler,
        boolean syncBlock) {
    if (state != State.STREAMING) {
        handler.failed(new IOException("stream already broken"), attachment);
        return;//  ww w. ja  v a  2  s  . c om
    }
    int dataLen = buf.readableBytes();
    final long ackedLength = nextPacketOffsetInBlock + dataLen;
    if (ackedLength == locatedBlock.getBlock().getNumBytes()) {
        // no new data, just return
        handler.completed(locatedBlock.getBlock().getNumBytes(), attachment);
        return;
    }
    Promise<Void> promise = eventLoop.newPromise();
    promise.addListener(new FutureListener<Void>() {

        @Override
        public void operationComplete(Future<Void> future) throws Exception {
            if (future.isSuccess()) {
                locatedBlock.getBlock().setNumBytes(ackedLength);
                handler.completed(ackedLength, attachment);
            } else {
                handler.failed(future.cause(), attachment);
            }
        }
    });
    Callback c = waitingAckQueue.peekLast();
    if (c != null && ackedLength == c.ackedLength) {
        // just append it to the tail of waiting ack queue,, do not issue new hflush request.
        waitingAckQueue.addLast(new Callback(promise, ackedLength, Collections.<Channel>emptyList()));
        return;
    }
    int chunkLen = summer.getBytesPerChecksum();
    int trailingPartialChunkLen = dataLen % chunkLen;
    int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
    int checksumLen = numChecks * summer.getChecksumSize();
    ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
    summer.calculateChunkedSums(buf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
    checksumBuf.writerIndex(checksumLen);
    PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno,
            false, dataLen, syncBlock);
    int headerLen = header.getSerializedSize();
    ByteBuf headerBuf = alloc.buffer(headerLen);
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    headerBuf.writerIndex(headerLen);

    waitingAckQueue.addLast(new Callback(promise, ackedLength, datanodeList));
    for (Channel ch : datanodeList) {
        ch.write(headerBuf.duplicate().retain());
        ch.write(checksumBuf.duplicate().retain());
        ch.writeAndFlush(buf.duplicate().retain());
    }
    checksumBuf.release();
    headerBuf.release();
    ByteBuf newBuf = alloc.directBuffer().ensureWritable(trailingPartialChunkLen);
    if (trailingPartialChunkLen != 0) {
        buf.readerIndex(dataLen - trailingPartialChunkLen).readBytes(newBuf, trailingPartialChunkLen);
    }
    buf.release();
    this.buf = newBuf;
    nextPacketOffsetInBlock += dataLen - trailingPartialChunkLen;
    nextPacketSeqno++;
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.java

License:Apache License

/**
 * End the current block and complete file at namenode. You should call
 * {@link #recoverAndClose(CancelableProgressable)} if this method throws an exception.
 *///from  ww  w  . j av  a2s .co m
@Override
public void close() throws IOException {
    assert !eventLoop.inEventLoop();
    final Promise<Void> promise = eventLoop.newPromise();
    eventLoop.execute(new Runnable() {

        @Override
        public void run() {
            endBlock(promise, nextPacketOffsetInBlock + buf.readableBytes());
        }
    });
    promise.addListener(new FutureListener<Void>() {

        @Override
        public void operationComplete(Future<Void> future) throws Exception {
            for (Channel ch : datanodeList) {
                ch.close();
            }
        }
    }).syncUninterruptibly();
    for (Channel ch : datanodeList) {
        ch.closeFuture().awaitUninterruptibly();
    }
    completeFile(client, namenode, src, clientName, locatedBlock.getBlock(), fileId);
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java

License:Apache License

private static void initialize(Configuration conf, final Channel channel, final DatanodeInfo dnInfo,
        final Enum<?> storageType, final OpWriteBlockProto.Builder writeBlockProtoBuilder, final int timeoutMs,
        DFSClient client, Token<BlockTokenIdentifier> accessToken, final Promise<Channel> promise) {
    Promise<Void> saslPromise = channel.eventLoop().newPromise();
    trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
    saslPromise.addListener(new FutureListener<Void>() {

        @Override/*from   www  . j ava 2  s.c  om*/
        public void operationComplete(Future<Void> future) throws Exception {
            if (future.isSuccess()) {
                // setup response processing pipeline first, then send request.
                processWriteBlockResponse(channel, dnInfo, promise, timeoutMs);
                requestWriteBlock(channel, storageType, writeBlockProtoBuilder);
            } else {
                promise.tryFailure(future.cause());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

private void saslNegotiate(final Channel ch) {
    UserGroupInformation ticket = getUGI();
    if (ticket == null) {
        failInit(ch, new FatalConnectionException("ticket/user is null"));
        return;/*from w  w w  .  ja  v  a  2  s.  c  o m*/
    }
    Promise<Boolean> saslPromise = ch.eventLoop().newPromise();
    final NettyHBaseSaslRpcClientHandler saslHandler;
    try {
        saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, authMethod, token,
                serverPrincipal, rpcClient.fallbackAllowed, this.rpcClient.conf);
    } catch (IOException e) {
        failInit(ch, e);
        return;
    }
    ch.pipeline().addFirst(new SaslChallengeDecoder(), saslHandler);
    saslPromise.addListener(new FutureListener<Boolean>() {

        @Override
        public void operationComplete(Future<Boolean> future) throws Exception {
            if (future.isSuccess()) {
                ChannelPipeline p = ch.pipeline();
                p.remove(SaslChallengeDecoder.class);
                p.remove(NettyHBaseSaslRpcClientHandler.class);

                // check if negotiate with server for connection header is necessary
                if (saslHandler.isNeedProcessConnectionHeader()) {
                    Promise<Boolean> connectionHeaderPromise = ch.eventLoop().newPromise();
                    // create the handler to handle the connection header
                    ChannelHandler chHandler = new NettyHBaseRpcConnectionHeaderHandler(connectionHeaderPromise,
                            conf, connectionHeaderWithLength);

                    // add ReadTimeoutHandler to deal with server doesn't response connection header
                    // because of the different configuration in client side and server side
                    p.addFirst(new ReadTimeoutHandler(RpcClient.DEFAULT_SOCKET_TIMEOUT_READ,
                            TimeUnit.MILLISECONDS));
                    p.addLast(chHandler);
                    connectionHeaderPromise.addListener(new FutureListener<Boolean>() {
                        @Override
                        public void operationComplete(Future<Boolean> future) throws Exception {
                            if (future.isSuccess()) {
                                ChannelPipeline p = ch.pipeline();
                                p.remove(ReadTimeoutHandler.class);
                                p.remove(NettyHBaseRpcConnectionHeaderHandler.class);
                                // don't send connection header, NettyHbaseRpcConnectionHeaderHandler
                                // sent it already
                                established(ch);
                            } else {
                                final Throwable error = future.cause();
                                scheduleRelogin(error);
                                failInit(ch, toIOE(error));
                            }
                        }
                    });
                } else {
                    // send the connection header to server
                    ch.write(connectionHeaderWithLength.retainedDuplicate());
                    established(ch);
                }
            } else {
                final Throwable error = future.cause();
                scheduleRelogin(error);
                failInit(ch, toIOE(error));
            }
        }
    });
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *///w  w w  .j ava  2  s.c o  m
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host,
        int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, rpcConf.getServerConnectTimeoutMs(),
            TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:org.apache.hive.spark.client.rpc.RpcServer.java

License:Apache License

@VisibleForTesting
Future<Rpc> registerClient(final String clientId, String secret, RpcDispatcher serverDispatcher,
        long clientTimeoutMs) {
    final Promise<Rpc> promise = group.next().newPromise();

    Runnable timeout = new Runnable() {
        @Override/* w  ww . j  a v a2s.c om*/
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for client connection."));
        }
    };
    ScheduledFuture<?> timeoutFuture = group.schedule(timeout, clientTimeoutMs, TimeUnit.MILLISECONDS);
    final ClientInfo client = new ClientInfo(clientId, promise, secret, serverDispatcher, timeoutFuture);
    if (pendingClients.putIfAbsent(clientId, client) != null) {
        throw new IllegalStateException(String.format("Client '%s' already registered.", clientId));
    }

    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (!p.isSuccess()) {
                pendingClients.remove(clientId);
            }
        }
    });

    return promise;
}