Example usage for io.netty.handler.codec LengthFieldBasedFrameDecoder LengthFieldBasedFrameDecoder

List of usage examples for io.netty.handler.codec LengthFieldBasedFrameDecoder LengthFieldBasedFrameDecoder

Introduction

In this page you can find the example usage for io.netty.handler.codec LengthFieldBasedFrameDecoder LengthFieldBasedFrameDecoder.

Prototype

public LengthFieldBasedFrameDecoder(int maxFrameLength, int lengthFieldOffset, int lengthFieldLength,
        int lengthAdjustment, int initialBytesToStrip) 

Source Link

Document

Creates a new instance.

Usage

From source file:org.apache.carbondata.core.dictionary.client.NonSecureDictionaryClient.java

License:Apache License

/**
 * start dictionary client//from w  ww .  java2  s  . c o  m
 *
 * @param address
 * @param port
 */
@Override
public void startClient(String secretKey, String address, int port, boolean encryptSecureServer) {
    LOGGER.info("Starting client on " + address + " " + port);
    long start = System.currentTimeMillis();
    // Create an Event with 1 thread.
    workerGroup = new NioEventLoopGroup(1);
    Bootstrap clientBootstrap = new Bootstrap();
    clientBootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline pipeline = ch.pipeline();
                    // Based on length provided at header, it collects all packets
                    pipeline.addLast("LengthDecoder", new LengthFieldBasedFrameDecoder(1048576, 0, 2, 0, 2));
                    pipeline.addLast("NonSecureDictionaryClientHandler", nonSecureDictionaryClientHandler);
                }
            });
    clientBootstrap.connect(new InetSocketAddress(address, port));
    LOGGER.info("Dictionary client Started, Total time spent : " + (System.currentTimeMillis() - start));
}

From source file:org.apache.carbondata.core.dictionary.server.NonSecureDictionaryServer.java

License:Apache License

/**
 * Binds dictionary server to an available port.
 *
 *///  w w  w.  j  a  v  a 2 s.co  m
@Override
public void bindToPort() {
    long start = System.currentTimeMillis();
    // Configure the server.
    int i = 0;
    while (i < 10) {
        int newPort = port + i;
        try {
            ServerBootstrap bootstrap = new ServerBootstrap();
            bootstrap.group(boss, worker);
            bootstrap.channel(NioServerSocketChannel.class);
            bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline pipeline = ch.pipeline();
                    pipeline.addLast("LengthDecoder", new LengthFieldBasedFrameDecoder(1048576, 0, 2, 0, 2));
                    pipeline.addLast("NonSecureDictionaryServerHandler", nonSecureDictionaryServerHandler);
                }
            });
            bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
            String hostToBind = findLocalIpAddress(LOGGER);
            //iteratively listening to newports
            InetSocketAddress address = hostToBind == null ? new InetSocketAddress(newPort)
                    : new InetSocketAddress(hostToBind, newPort);
            bootstrap.bind(address).sync();
            LOGGER.info("Dictionary Server started, Time spent " + (System.currentTimeMillis() - start)
                    + " Listening on port " + newPort);
            this.port = newPort;
            this.host = hostToBind;
            break;
        } catch (Exception e) {
            LOGGER.error("Dictionary Server Failed to bind to port:" + newPort, e);
            if (i == 9) {
                throw new RuntimeException("Dictionary Server Could not bind to any port");
            }
        }
        i++;
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateClient.java

License:Apache License

/**
 * Creates a client with the specified number of event loop threads.
 *
 * @param numEventLoopThreads Number of event loop threads (minimum 1).
 *//*  w  w w.ja v a 2  s.c o  m*/
public KvStateClient(int numEventLoopThreads, KvStateRequestStats stats) {
    Preconditions.checkArgument(numEventLoopThreads >= 1, "Non-positive number of event loop threads.");
    NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads);

    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("Flink KvStateClient Event Loop Thread %d").build();

    NioEventLoopGroup nioGroup = new NioEventLoopGroup(numEventLoopThreads, threadFactory);

    this.bootstrap = new Bootstrap().group(nioGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.ALLOCATOR, bufferPool).handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
                            // ChunkedWriteHandler respects Channel writability
                            .addLast(new ChunkedWriteHandler());
                }
            });

    this.stats = Preconditions.checkNotNull(stats, "Statistics tracker");
}

From source file:org.apache.flink.runtime.query.netty.KvStateClientTest.java

License:Apache License

private Channel createServerChannel(final ChannelHandler... handlers)
        throws UnknownHostException, InterruptedException {
    ServerBootstrap bootstrap = new ServerBootstrap()
            // Bind address and port
            .localAddress(InetAddress.getLocalHost(), 0)
            // NIO server channels
            .group(NIO_GROUP).channel(NioServerSocketChannel.class)
            // See initializer for pipeline details
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override/*from w w w .  j av  a2 s.c o  m*/
                protected void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
                            .addLast(handlers);
                }
            });

    return bootstrap.bind().sync().channel();
}

From source file:org.apache.flink.runtime.query.netty.KvStateServerHandlerTest.java

License:Apache License

/**
 * Frame length decoder (expected by the serialized messages).
 *///from   w w  w.ja  v  a2  s .  c om
private ChannelHandler getFrameDecoder() {
    return new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4);
}

From source file:org.apache.flink.runtime.query.netty.KvStateServerTest.java

License:Apache License

/**
 * Tests a simple successful query via a SocketChannel.
 *///from   ww w . ja v a  2 s  .c o m
@Test
public void testSimpleRequest() throws Exception {
    KvStateServer server = null;
    Bootstrap bootstrap = null;
    try {
        KvStateRegistry registry = new KvStateRegistry();
        KvStateRequestStats stats = new AtomicKvStateRequestStats();

        server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
        server.start();

        KvStateServerAddress serverAddress = server.getAddress();
        int numKeyGroups = 1;
        AbstractStateBackend abstractBackend = new MemoryStateBackend();
        DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
        dummyEnv.setKvStateRegistry(registry);
        AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv,
                new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0),
                registry.createTaskRegistry(new JobID(), new JobVertexID()));

        final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();

        registry.registerListener(registryListener);

        ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
        desc.setQueryable("vanilla");

        ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE,
                VoidNamespaceSerializer.INSTANCE, desc);

        // Update KvState
        int expectedValue = 712828289;

        int key = 99812822;
        backend.setCurrentKey(key);
        state.update(expectedValue);

        // Request
        byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key,
                IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);

        // Connect to the server
        final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
        bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4),
                new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                        responses.add((ByteBuf) msg);
                    }
                });

        Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();

        long requestId = Integer.MAX_VALUE + 182828L;

        assertTrue(registryListener.registrationName.equals("vanilla"));
        ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId,
                registryListener.kvStateId, serializedKeyAndNamespace);

        channel.writeAndFlush(request);

        ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);

        assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
        KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);

        assertEquals(requestId, response.getRequestId());
        int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(),
                IntSerializer.INSTANCE);
        assertEquals(expectedValue, actualValue);
    } finally {
        if (server != null) {
            server.shutDown();
        }

        if (bootstrap != null) {
            EventLoopGroup group = bootstrap.group();
            if (group != null) {
                group.shutdownGracefully();
            }
        }
    }
}

From source file:org.apache.giraph.comm.netty.NettyClient.java

License:Apache License

/**
 * Only constructor//from  w  w  w  .  ja va2s.  c om
 *
 * @param context Context for progress
 * @param conf Configuration
 * @param myTaskInfo Current task info
 * @param exceptionHandler handler for uncaught exception. Will
 *                         terminate job.
 */
public NettyClient(Mapper<?, ?, ?, ?>.Context context, final ImmutableClassesGiraphConfiguration conf,
        TaskInfo myTaskInfo, final Thread.UncaughtExceptionHandler exceptionHandler) {
    this.context = context;
    this.myTaskInfo = myTaskInfo;
    this.channelsPerServer = GiraphConstants.CHANNELS_PER_SERVER.get(conf);
    sendBufferSize = CLIENT_SEND_BUFFER_SIZE.get(conf);
    receiveBufferSize = CLIENT_RECEIVE_BUFFER_SIZE.get(conf);

    limitNumberOfOpenRequests = conf.getBoolean(LIMIT_NUMBER_OF_OPEN_REQUESTS,
            LIMIT_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
    if (limitNumberOfOpenRequests) {
        maxNumberOfOpenRequests = conf.getInt(MAX_NUMBER_OF_OPEN_REQUESTS, MAX_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Limit number of open requests to " + maxNumberOfOpenRequests);
        }
    } else {
        maxNumberOfOpenRequests = -1;
    }

    maxRequestMilliseconds = MAX_REQUEST_MILLISECONDS.get(conf);

    maxConnectionFailures = NETTY_MAX_CONNECTION_FAILURES.get(conf);

    waitingRequestMsecs = WAITING_REQUEST_MSECS.get(conf);

    maxPoolSize = GiraphConstants.NETTY_CLIENT_THREADS.get(conf);

    maxResolveAddressAttempts = MAX_RESOLVE_ADDRESS_ATTEMPTS.get(conf);

    clientRequestIdRequestInfoMap = new MapMaker().concurrencyLevel(maxPoolSize).makeMap();

    handlerToUseExecutionGroup = NETTY_CLIENT_EXECUTION_AFTER_HANDLER.get(conf);
    useExecutionGroup = NETTY_CLIENT_USE_EXECUTION_HANDLER.get(conf);
    if (useExecutionGroup) {
        int executionThreads = NETTY_CLIENT_EXECUTION_THREADS.get(conf);
        executionGroup = new DefaultEventExecutorGroup(executionThreads,
                ThreadUtils.createThreadFactory("netty-client-exec-%d", exceptionHandler));
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Using execution handler with " + executionThreads + " threads after "
                    + handlerToUseExecutionGroup + ".");
        }
    } else {
        executionGroup = null;
    }

    workerGroup = new NioEventLoopGroup(maxPoolSize,
            ThreadUtils.createThreadFactory("netty-client-worker-%d", exceptionHandler));

    bootstrap = new Bootstrap();
    bootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, MAX_CONNECTION_MILLISECONDS_DEFAULT)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.SO_SNDBUF, sendBufferSize).option(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    /*if_not[HADOOP_NON_SECURE]*/
                    if (conf.authenticate()) {
                        LOG.info("Using Netty with authentication.");

                        // Our pipeline starts with just byteCounter, and then we use
                        // addLast() to incrementally add pipeline elements, so that we
                        // can name them for identification for removal or replacement
                        // after client is authenticated by server.
                        // After authentication is complete, the pipeline's SASL-specific
                        // functionality is removed, restoring the pipeline to exactly the
                        // same configuration as it would be without authentication.
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        // The following pipeline component is needed to decode the
                        // server's SASL tokens. It is replaced with a
                        // FixedLengthFrameDecoder (same as used with the
                        // non-authenticated pipeline) after authentication
                        // completes (as in non-auth pipeline below).
                        PipelineUtils.addLastWithExecutorCheck("length-field-based-frame-decoder",
                                new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4), handlerToUseExecutionGroup,
                                executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        // The following pipeline component responds to the server's SASL
                        // tokens with its own responses. Both client and server share the
                        // same Hadoop Job token, which is used to create the SASL
                        // tokens to authenticate with each other.
                        // After authentication finishes, this pipeline component
                        // is removed.
                        PipelineUtils.addLastWithExecutorCheck("sasl-client-handler",
                                new SaslClientHandler(conf), handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                    } else {
                        LOG.info("Using Netty without authentication.");
                        /*end[HADOOP_NON_SECURE]*/
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("fixed-length-frame-decoder",
                                new FixedLengthFrameDecoder(RequestServerHandler.RESPONSE_BYTES),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);

                        /*if_not[HADOOP_NON_SECURE]*/
                    }
                    /*end[HADOOP_NON_SECURE]*/
                }
            });
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *///from  w  w  w .  jav a 2s  .c  o  m
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

License:Apache License

/**
 * Start HBase connection/*from   w  w  w  . j  a va2 s.  c  o  m*/
 *
 * @param ch channel to start connection on
 */
private void startHBaseConnection(Channel ch) {
    ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
    ch.pipeline().addLast(new AsyncServerResponseHandler(this));
    try {
        writeChannelHeader(ch).addListener(new GenericFutureListener<ChannelFuture>() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (!future.isSuccess()) {
                    close(future.cause());
                    return;
                }
                List<AsyncCall> callsToWrite;
                synchronized (pendingCalls) {
                    connected = true;
                    callsToWrite = new ArrayList<AsyncCall>(pendingCalls.values());
                }
                for (AsyncCall call : callsToWrite) {
                    writeRequest(call);
                }
            }
        });
    } catch (IOException e) {
        close(e);
    }
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannelImpl.java

License:Apache License

private void startConnectionWithEncryption(Channel ch) {
    // for rpc encryption, the order of ChannelInboundHandler should be:
    // LengthFieldBasedFrameDecoder->SaslClientHandler->LengthFieldBasedFrameDecoder
    // Don't skip the first 4 bytes for length in beforeUnwrapDecoder,
    // SaslClientHandler will handler this
    ch.pipeline().addFirst("beforeUnwrapDecoder",
            new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 0));
    ch.pipeline().addLast("afterUnwrapDecoder",
            new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
    ch.pipeline().addLast(new AsyncServerResponseHandler(this));
    List<AsyncCall> callsToWrite;
    synchronized (pendingCalls) {
        connected = true;/*from  ww  w  .  j a v  a2 s  .  co  m*/
        callsToWrite = new ArrayList<AsyncCall>(pendingCalls.values());
    }
    for (AsyncCall call : callsToWrite) {
        writeRequest(call);
    }
}