Example usage for io.netty.channel.group DefaultChannelGroup DefaultChannelGroup

List of usage examples for io.netty.channel.group DefaultChannelGroup DefaultChannelGroup

Introduction

In this page you can find the example usage for io.netty.channel.group DefaultChannelGroup DefaultChannelGroup.

Prototype

public DefaultChannelGroup(EventExecutor executor, boolean stayClosed) 

Source Link

Document

Creates a new group with a generated name and the provided EventExecutor to notify the ChannelGroupFuture s.

Usage

From source file:com.adobe.acs.livereload.impl.LiveReloadServerImpl.java

License:Apache License

@Activate
protected void activate(ComponentContext ctx) throws Exception {
    Dictionary<?, ?> props = ctx.getProperties();
    this.port = PropertiesUtil.toInteger(props.get(PROP_PORT), DEFAULT_PORT);
    this.pathPrefixes = PropertiesUtil.toStringArray(props.get(PROP_PREFIXES), DEFAULT_PREFIXES);
    this.broadcastGroup = new NioEventLoopGroup(1);

    this.group = new DefaultChannelGroup("live-reload", broadcastGroup.next());
    this.infos = new ConcurrentHashMap<Channel, ChannelInfo>();

    this.matcher = new ContentPageMatcher();

    startServer();/*w  w w  . ja v  a  2  s . c  o  m*/
    running = true;

    if (PropertiesUtil.toBoolean(props.get(PROP_JS_FILTER_ENABLED), DEFAULT_JS_FILTER_ENABLED)) {
        Dictionary<Object, Object> filterProps = new Hashtable<Object, Object>();
        filterProps.put("sling.filter.scope", "request");
        filterProps.put("filter.order", FILTER_ORDER);
        filterReference = ctx.getBundleContext().registerService(Filter.class.getName(),
                new JavaScriptInjectionFilter(port, pathPrefixes), filterProps);
    }
}

From source file:com.ccompass.netty.proxy.Proxy.java

License:Apache License

public static void main(String[] args) throws Exception {
    ProxyConfig.loadConfig();//from  ww  w  . ja  v  a 2  s  .c o  m
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    checkOtherChannel(ProxyConfig.config.checktimes);
    //?grops
    for (int i = 0; i < ProxyConfig.config.branchList.size(); i++) {
        ChannelGroup group = new DefaultChannelGroup("server-group", null);
        NettyClient.sinkGroups.add(group);
    }
    //????
    if (ProxyConfig.config.branchNumbers > 0) {
        List<List<Channel>> list = new ArrayList();
        for (int i = 0; i < ProxyConfig.config.branchList.size(); i++) {
            List<Channel> channels = new ArrayList<Channel>();
            list.add(channels);
        }
        NettyClient.setSinkChannels(list);
    }
    try {
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                //    .handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ProxyInitializer(ProxyConfig.config.mainIP, ProxyConfig.config.mainPort))
                .childOption(ChannelOption.AUTO_READ, false).bind(ProxyConfig.config.proxyPort).sync().channel()
                .closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.github.mrstampy.gameboot.netty.NettyConnectionRegistry.java

License:Open Source License

/**
 * Puts the channel in the specified group, creating the group if it does not
 * yet exist./*from   w w w.  ja  v a 2 s .  co m*/
 *
 * @param groupName
 *          the group key
 * @param channel
 *          the channel
 */
public void putInGroup(String groupName, Channel channel) {
    groupCheck(groupName, channel);

    ChannelGroup group = groups.get(groupName);
    if (group == null) {
        group = new DefaultChannelGroup(groupName, ImmediateEventExecutor.INSTANCE);
        groups.put(groupName, group);
    }

    if (!group.contains(channel))
        group.add(channel);
}

From source file:com.heliosapm.streams.metrichub.HubManager.java

License:Apache License

private HubManager(final Properties properties) {
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            try {
                close();/*from w  w w. java2  s .  co  m*/
            } catch (Exception x) {
                /* No Op */}
        }
    });
    log.info(">>>>> Initializing HubManager...");
    metricMetaService = new MetricsMetaAPIImpl(properties);
    tsdbEndpoint = TSDBEndpoint.getEndpoint(metricMetaService.getSqlWorker());
    for (String url : tsdbEndpoint.getUpServers()) {
        final URL tsdbUrl = URLHelper.toURL(url);
        tsdbAddresses.add(new InetSocketAddress(tsdbUrl.getHost(), tsdbUrl.getPort()));
    }
    endpointCount = tsdbAddresses.size();
    endpointSequence = new AtomicInteger(endpointCount);
    group = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() * 2,
            metricMetaService.getForkJoinPool());
    bootstrap = new Bootstrap();
    bootstrap.handler(channelInitializer).group(group).channel(NioSocketChannel.class)
            .option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator())
            .option(ChannelOption.ALLOCATOR, BufferManager.getInstance());
    final ChannelPoolHandler poolHandler = this;
    poolMap = new AbstractChannelPoolMap<InetSocketAddress, SimpleChannelPool>() {
        @Override
        protected SimpleChannelPool newPool(final InetSocketAddress key) {
            final Bootstrap b = new Bootstrap().handler(channelInitializer).group(group).remoteAddress(key)
                    .channel(NioSocketChannel.class)
                    .option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator())
                    .option(ChannelOption.ALLOCATOR, BufferManager.getInstance());
            return new SimpleChannelPool(b, poolHandler);
        }
    };
    eventExecutor = new DefaultEventExecutor(metricMetaService.getForkJoinPool());
    channelGroup = new DefaultChannelGroup("MetricHubChannelGroup", eventExecutor);

    //      tsdbAddresses.parallelStream().forEach(addr -> {
    //         final Set<Channel> channels = Collections.synchronizedSet(new HashSet<Channel>(3));
    //         IntStream.of(1,2,3).parallel().forEach(i -> {
    //            final ChannelPool pool = poolMap.get(addr); 
    //            try {channels.add(pool.acquire().awaitUninterruptibly().get());
    //            } catch (Exception e) {}
    //            log.info("Acquired [{}] Channels", channels.size());
    //            channels.parallelStream().forEach(ch -> pool.release(ch));
    //         });
    //      });

    log.info("<<<<< HubManager Initialized.");
}

From source file:com.heliosapm.streams.tracing.writers.NetWriter.java

License:Apache License

/**
 * {@inheritDoc}/*from   w ww  . j a  v a2 s . co m*/
 * @see com.heliosapm.streams.tracing.AbstractMetricWriter#configure(java.util.Properties)
 */
@Override
public void configure(final Properties config) {
    super.configure(config);
    remotes = ConfigurationHelper.getArraySystemThenEnvProperty(CONFIG_REMOTE_URIS, DEFAULT_REMOTE_URIS,
            config);
    Collections.addAll(remoteUris, remotes);
    channelGroupThreads = ConfigurationHelper.getIntSystemThenEnvProperty(CONFIG_EXEC_THREADS,
            DEFAULT_EXEC_THREADS, config);
    this.config.put("channelGroupThreads", channelGroupThreads);
    eventLoopThreads = ConfigurationHelper.getIntSystemThenEnvProperty(CONFIG_ELOOP_THREADS,
            DEFAULT_ELOOP_THREADS, config);
    this.config.put("eventLoopThreads", eventLoopThreads);
    eventExecutor = new UnorderedThreadPoolEventExecutor(channelGroupThreads, groupThreadFactory, this);
    channels = new DefaultChannelGroup(getClass().getSimpleName() + "Channels", eventExecutor);
    group = new NioEventLoopGroup(eventLoopThreads, eventLoopThreadFactory);
    bootstrap.group(group).channel(channelType).handler(getChannelInitializer());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000); // FIXME: config
    bootstrap.option(ChannelOption.ALLOCATOR, BufferManager.getInstance());
    this.config.put("connectTimeout", 5000);

    // FIXME: Tweaks for channel configuration

}

From source file:com.heliosapm.tsdblite.Server.java

License:Apache License

/**
 * Creates a new Server//from  w  w w . j  a va2s.c om
 */
private Server() {
    log.info("Configuring Netty Server....");

    String serverLevel = ConfigurationHelper.getSystemThenEnvProperty(Constants.CONF_NETTY_SERVER_LOGLEVEL,
            Constants.DEFAULT_NETTY_SERVER_LOGLEVEL);
    loggingHandler = new LoggingHandler(getClass(), LogLevel.valueOf(serverLevel.trim().toUpperCase()));
    iface = ConfigurationHelper.getSystemThenEnvProperty(Constants.CONF_NETTY_IFACE,
            Constants.DEFAULT_NETTY_IFACE);
    port = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_PORT,
            Constants.DEFAULT_NETTY_PORT);
    int bossThreads = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_BOSS_THREADS,
            Constants.DEFAULT_NETTY_BOSS_THREADS);
    int workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_WORKER_THREADS,
            Constants.DEFAULT_NETTY_WORKER_THREADS);
    int groupThreads = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_NETTY_CGROUP_THREADS,
            Constants.DEFAULT_NETTY_CGROUP_THREADS);
    bossPool = new ManagedDefaultExecutorServiceFactory("bossPool").newExecutorService(bossThreads);
    //      ForkJoinPoolManager.register(bossPool, BOSS_POOL_ON);
    workerPool = new ManagedDefaultExecutorServiceFactory("workerPool").newExecutorService(workerThreads);
    //      ForkJoinPoolManager.register(workerPool, WORKER_POOL_ON);
    channelGroupPool = new ManagedDefaultExecutorServiceFactory("groupPool").newExecutorService(groupThreads);
    //      ForkJoinPoolManager.register(channelGroupPool, CGROUP_POOL_ON);
    bossGroup = new NioEventLoopGroup(bossThreads, bossPool, selectorProvider);
    workerGroup = new NioEventLoopGroup(bossThreads, workerPool, selectorProvider);
    bootStrap = new ServerBootstrap();
    groupExecutor = new DefaultEventExecutor(channelGroupPool);
    channelGroup = new DefaultChannelGroup("TSDBLite", groupExecutor);
    MetricCache.getInstance(); // fire up the metric cache before we start taking calls   
    log.info("Selector: {}", selectorProvider.getClass().getName());
    bootStrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).handler(loggingHandler)
            .childHandler(this);
    try {
        serverChannel = (NioServerSocketChannel) bootStrap.bind(iface, port).sync().channel();
    } catch (Exception ex) {
        stop();
        log.error("Failed to bind Netty server on [{}:{}]", iface, port, ex);
        throw new RuntimeException("Failed to bind Netty server", ex);
    }
    JMXHelper.registerMBean(this, OBJECT_NAME);
    log.info(
            "\n\t======================================\n\tNetty Server started on [{}:{}]\n\t======================================",
            iface, port);
}

From source file:com.kixeye.kixmpp.server.KixmppServer.java

License:Apache License

/**
 * Creates a new {@link KixmppServer} with the given ssl engine.
 * //  www.  ja  va 2  s  .co m
 * @param bindAddress
 * @param domain
 */
public KixmppServer(InetSocketAddress bindAddress, String domain, InetSocketAddress clusterAddress,
        NodeDiscovery clusterDiscovery, boolean useEpollIfAvailable) {
    if (useEpollIfAvailable && OS.indexOf("nux") >= 0) {
        this.bootstrap = new ServerBootstrap().group(new EpollEventLoopGroup(), new EpollEventLoopGroup())
                .channel(EpollServerSocketChannel.class).childHandler(new ChannelInitializer<SocketChannel>() {
                    protected void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new KixmppCodec());
                        ch.pipeline().addLast(new KixmppServerMessageHandler());
                    }
                });
    } else {
        this.bootstrap = new ServerBootstrap().group(new NioEventLoopGroup(), new NioEventLoopGroup())
                .channel(NioServerSocketChannel.class).childHandler(new ChannelInitializer<SocketChannel>() {
                    protected void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new KixmppCodec());
                        ch.pipeline().addLast(new KixmppServerMessageHandler());
                    }
                });
    }

    this.cluster = new ClusterClient(this, clusterAddress.getHostName(), clusterAddress.getPort(),
            clusterDiscovery, 300000, bootstrap.group());
    this.cluster.getMessageRegistry().addCustomMessage(1, RoomBroadcastTask.class);
    this.cluster.getMessageRegistry().addCustomMessage(2, RoomPresenceBroadcastTask.class);
    this.cluster.getMessageRegistry().addCustomMessage(3, PrivateChatTask.class);
    this.cluster.getMessageRegistry().addCustomMessage(4, GetMucRoomNicknamesRequest.class);
    this.cluster.getMessageRegistry().addCustomMessage(5, GetMucRoomNicknamesResponse.class);
    this.mapReduce = new MapReduceTracker(this, bootstrap.group());
    this.channels = new DefaultChannelGroup("All Channels", GlobalEventExecutor.INSTANCE);

    this.bindAddress = bindAddress;
    this.domain = domain.toLowerCase();
    this.eventEngine = new KixmppEventEngine();

    this.modulesToRegister.add(FeaturesKixmppServerModule.class.getName());
    this.modulesToRegister.add(SaslKixmppServerModule.class.getName());
    this.modulesToRegister.add(BindKixmppServerModule.class.getName());
    this.modulesToRegister.add(SessionKixmppServerModule.class.getName());
    this.modulesToRegister.add(PresenceKixmppServerModule.class.getName());
    this.modulesToRegister.add(MucKixmppServerModule.class.getName());
    this.modulesToRegister.add(RosterKixmppServerModule.class.getName());
    this.modulesToRegister.add(DiscoKixmppServerModule.class.getName());
    this.modulesToRegister.add(ChatKixmppServerModule.class.getName());
}

From source file:com.linkedin.r2.transport.http.client.AbstractNettyStreamClient.java

License:Apache License

/**
 * Creates a new HttpNettyClient//from w  ww.  jav  a2 s.  c o m
 *
 * @param eventLoopGroup            The NioEventLoopGroup; it is the caller's responsibility to
 *                                  shut it down
 * @param executor                  An executor; it is the caller's responsibility to shut it down
 * @param requestTimeout            Timeout, in ms, to get a connection from the pool or create one
 * @param shutdownTimeout           Timeout, in ms, the client should wait after shutdown is
 *                                  initiated before terminating outstanding requests
 * @param maxResponseSize           Maximum size of a HTTP response
 * @param callbackExecutors         An optional EventExecutorGroup to invoke user callback
 * @param jmxManager                A management class that is aware of the creation/shutdown event
 *                                  of the underlying {@link ChannelPoolManager}
 * @param maxConcurrentConnections  Maximum number of concurrent connection attempts the HTTP
 *                                  connection pool can make.
 */
public AbstractNettyStreamClient(NioEventLoopGroup eventLoopGroup, ScheduledExecutorService executor,
        long requestTimeout, long shutdownTimeout, long maxResponseSize, ExecutorService callbackExecutors,
        AbstractJmxManager jmxManager, int maxConcurrentConnections) {
    _maxResponseSize = maxResponseSize;
    _maxConcurrentConnections = maxConcurrentConnections;
    _scheduler = executor;
    _callbackExecutors = callbackExecutors == null ? eventLoopGroup : callbackExecutors;
    _requestTimeout = requestTimeout;
    _shutdownTimeout = shutdownTimeout;
    _requestTimeoutMessage = "Exceeded request timeout of " + _requestTimeout + "ms";
    _jmxManager = jmxManager;
    _allChannels = new DefaultChannelGroup("R2 client channels", eventLoopGroup.next());
}

From source file:com.linkedin.r2.transport.http.client.AbstractNettyStreamClient.java

License:Apache License

AbstractNettyStreamClient(ChannelPoolFactory factory, ScheduledExecutorService executor, int requestTimeout,
        int shutdownTimeout, long maxResponseSize) {
    _maxResponseSize = maxResponseSize;/*w w w  .j a va2 s  .c  o m*/
    _scheduler = executor;
    _callbackExecutors = new DefaultEventExecutorGroup(1);
    _requestTimeout = requestTimeout;
    _shutdownTimeout = shutdownTimeout;
    _requestTimeoutMessage = "Exceeded request timeout of " + _requestTimeout + "ms";
    _jmxManager = AbstractJmxManager.NULL_JMX_MANAGER;
    _maxConcurrentConnections = Integer.MAX_VALUE;
    _allChannels = new DefaultChannelGroup("R2 client channels", GlobalEventExecutor.INSTANCE);
}

From source file:com.linkedin.r2.transport.http.client.HttpNettyClient.java

License:Apache License

/**
 * Creates a new HttpNettyClient//from   w  w w .  ja  va 2 s . c o m
 *
 * @param eventLoopGroup            The NioEventLoopGroup; it is the caller's responsibility to
 *                                  shut it down
 * @param executor                  An executor; it is the caller's responsibility to shut it down
 * @param poolSize                  Maximum size of the underlying HTTP connection pool
 * @param requestTimeout            Timeout, in ms, to get a connection from the pool or create one
 * @param idleTimeout               Interval after which idle connections will be automatically closed
 * @param shutdownTimeout           Timeout, in ms, the client should wait after shutdown is
 *                                  initiated before terminating outstanding requests
 * @param maxResponseSize           Maximum size of a HTTP response
 * @param sslContext                {@link SSLContext}
 * @param sslParameters             {@link SSLParameters}with overloaded construct
 * @param callbackExecutors         An optional EventExecutorGroup to invoke user callback
 * @param poolWaiterSize            Maximum waiters waiting on the HTTP connection pool
 * @param name                      Name of the {@link HttpNettyClient}
 * @param jmxManager                A management class that is aware of the creation/shutdown event
 *                                  of the underlying {@link ChannelPoolManager}
 * @param strategy                  The strategy used to return pool objects.
 * @param minPoolSize               Minimum number of objects in the pool. Set to zero for no minimum.
 * @param maxHeaderSize             Maximum size of all HTTP headers
 * @param maxChunkSize              Maximum size of a HTTP chunk
 * @param maxConcurrentConnections  Maximum number of concurrent connection attempts the HTTP
 *                                  connection pool can make.
 */
public HttpNettyClient(NioEventLoopGroup eventLoopGroup, ScheduledExecutorService executor, int poolSize,
        long requestTimeout, long idleTimeout, long shutdownTimeout, int maxResponseSize, SSLContext sslContext,
        SSLParameters sslParameters, ExecutorService callbackExecutors, int poolWaiterSize, String name,
        AbstractJmxManager jmxManager, AsyncPoolImpl.Strategy strategy, int minPoolSize, int maxHeaderSize,
        int maxChunkSize, int maxConcurrentConnections) {
    Bootstrap bootstrap = new Bootstrap().group(eventLoopGroup).channel(NioSocketChannel.class)
            .handler(new HttpClientPipelineInitializer(sslContext, sslParameters));

    _channelPoolManager = new ChannelPoolManager(
            new ChannelPoolFactoryImpl(bootstrap, poolSize, idleTimeout, poolWaiterSize, strategy, minPoolSize),
            name + ChannelPoolManager.BASE_NAME);

    _maxResponseSize = maxResponseSize;
    _maxHeaderSize = maxHeaderSize;
    _maxChunkSize = maxChunkSize;
    _maxConcurrentConnections = maxConcurrentConnections;
    _scheduler = executor;
    _callbackExecutors = callbackExecutors == null ? eventLoopGroup : callbackExecutors;
    _requestTimeout = requestTimeout;
    _shutdownTimeout = shutdownTimeout;
    _requestTimeoutMessage = "Exceeded request timeout of " + _requestTimeout + "ms";
    _jmxManager = jmxManager;
    _allChannels = new DefaultChannelGroup("R2 client channels", eventLoopGroup.next());
    _jmxManager.onProviderCreate(_channelPoolManager);
}