Example usage for com.google.common.net HostAndPort toString

List of usage examples for com.google.common.net HostAndPort toString

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort toString.

Prototype

@Override
public String toString() 

Source Link

Document

Rebuild the host:port string, including brackets if necessary.

Usage

From source file:org.apache.brooklyn.demo.CumulusRDFApplication.java

/**
 * Create the application entities:/*from   w  ww  .j a v  a  2  s  .c  om*/
 * <ul>
 * <li>A {@link CassandraFabric} of {@link CassandraDatacenter}s containing {@link CassandraNode}s
 * <li>A {@link TomcatServer}
 * </ul>
 */
@Override
public void initApp() {
    // Cassandra cluster
    EntitySpec<CassandraDatacenter> clusterSpec = EntitySpec.create(CassandraDatacenter.class)
            .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(CassandraNode.class)
                    //FIXME can probably use JMXMP_AND_RMI now, to deploy to GCE and elsewhere
                    .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
                    .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
                    .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
                    .configure(CassandraNode.THRIFT_PORT,
                            PortRanges.fromInteger(getConfig(CASSANDRA_THRIFT_PORT)))
                    .enricher(EnricherSpec.create(ServiceFailureDetector.class))
                    .policy(PolicySpec.create(ServiceRestarter.class).configure(
                            ServiceRestarter.FAILURE_SENSOR_TO_MONITOR, ServiceFailureDetector.ENTITY_FAILED)))
            .policy(PolicySpec.create(ServiceReplacer.class).configure(
                    ServiceReplacer.FAILURE_SENSOR_TO_MONITOR, ServiceRestarter.ENTITY_RESTART_FAILED));

    if (getConfig(MULTI_REGION_FABRIC)) {
        cassandra = addChild(EntitySpec.create(CassandraFabric.class)
                .configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)) // per location
                .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME,
                        "org.apache.brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
                .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL,
                        "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
                .configure(CassandraFabric.MEMBER_SPEC, clusterSpec));
    } else {
        cassandra = addChild(
                EntitySpec.create(clusterSpec).configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                        .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)));
    }

    // Tomcat web-app server
    webapp = addChild(EntitySpec.create(TomcatServer.class)
            .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
            .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
            .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
            .configure(JavaWebAppService.ROOT_WAR,
                    "https://cumulusrdf.googlecode.com/svn/wiki/downloads/cumulusrdf-1.0.1.war")
            .configure(UsesJava.JAVA_SYSPROPS, MutableMap.of("cumulusrdf.config-file", "/tmp/cumulus.yaml")));

    // Add an effector to tomcat to reconfigure with a new YAML config file
    ((EntityInternal) webapp).getMutableEntityType().addEffector(cumulusConfig, new EffectorBody<Void>() {
        @Override
        public Void call(ConfigBag parameters) {
            // Process the YAML template given in the application config
            String url = Entities.getRequiredUrlConfig(CumulusRDFApplication.this, CUMULUS_RDF_CONFIG_URL);
            Map<String, Object> config;
            synchronized (endpointMutex) {
                config = MutableMap.<String, Object>of("cassandraHostname", endpoint.getHostText(),
                        "cassandraThriftPort", endpoint.getPort());
            }
            String contents = TemplateProcessor.processTemplateContents(
                    ResourceUtils.create(CumulusRDFApplication.this).getResourceAsString(url), config);
            // Copy the file contents to the remote machine
            return DynamicTasks.queue(SshEffectorTasks.put("/tmp/cumulus.yaml").contents(contents)).get();
        }
    });

    // Listen for HOSTNAME changes from the Cassandra fabric to show at least one node is available
    subscriptions().subscribe(cassandra, CassandraDatacenter.HOSTNAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            if (Strings.isNonBlank(event.getValue())) {
                synchronized (endpointMutex) {
                    String hostname = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.HOSTNAME))
                            .getUnchecked();
                    Integer thriftPort = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.THRIFT_PORT))
                            .getUnchecked();
                    HostAndPort current = HostAndPort.fromParts(hostname, thriftPort);

                    // Check if the cluster access point has changed
                    if (!current.equals(endpoint)) {
                        log.info("Setting cluster endpoint to {}", current.toString());
                        endpoint = current;

                        // Reconfigure the CumulusRDF application and restart tomcat if necessary
                        webapp.invoke(cumulusConfig, MutableMap.<String, Object>of());
                        if (webapp.getAttribute(Startable.SERVICE_UP)) {
                            webapp.restart();
                        }
                    }
                }
            }
        }
    });
}

From source file:brooklyn.demo.CumulusRDFApplication.java

/**
 * Create the application entities://from   w w w .j a v  a 2  s .c  o  m
 * <ul>
 * <li>A {@link CassandraFabric} of {@link CassandraDatacenter}s containing {@link CassandraNode}s
 * <li>A {@link TomcatServer}
 * </ul>
 */
@Override
public void initApp() {
    // Cassandra cluster
    EntitySpec<CassandraDatacenter> clusterSpec = EntitySpec.create(CassandraDatacenter.class)
            .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(CassandraNode.class)
                    //FIXME can probably use JMXMP_AND_RMI now, to deploy to GCE and elsewhere
                    .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
                    .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
                    .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
                    .configure(CassandraNode.THRIFT_PORT,
                            PortRanges.fromInteger(getConfig(CASSANDRA_THRIFT_PORT)))
                    .enricher(EnricherSpec.create(ServiceFailureDetector.class))
                    .policy(PolicySpec.create(ServiceRestarter.class).configure(
                            ServiceRestarter.FAILURE_SENSOR_TO_MONITOR, ServiceFailureDetector.ENTITY_FAILED)))
            .policy(PolicySpec.create(ServiceReplacer.class).configure(
                    ServiceReplacer.FAILURE_SENSOR_TO_MONITOR, ServiceRestarter.ENTITY_RESTART_FAILED));

    if (getConfig(MULTI_REGION_FABRIC)) {
        cassandra = addChild(
                EntitySpec.create(CassandraFabric.class).configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                        .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)) // per location
                        .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME,
                                "brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
                        .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL,
                                "classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
                        .configure(CassandraFabric.MEMBER_SPEC, clusterSpec));
    } else {
        cassandra = addChild(
                EntitySpec.create(clusterSpec).configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                        .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)));
    }

    // Tomcat web-app server
    webapp = addChild(EntitySpec.create(TomcatServer.class)
            .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
            .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
            .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
            .configure(JavaWebAppService.ROOT_WAR,
                    "https://cumulusrdf.googlecode.com/svn/wiki/downloads/cumulusrdf-1.0.1.war")
            .configure(UsesJava.JAVA_SYSPROPS, MutableMap.of("cumulusrdf.config-file", "/tmp/cumulus.yaml")));

    // Add an effector to tomcat to reconfigure with a new YAML config file
    ((EntityInternal) webapp).getMutableEntityType().addEffector(cumulusConfig, new EffectorBody<Void>() {
        @Override
        public Void call(ConfigBag parameters) {
            // Process the YAML template given in the application config
            String url = Entities.getRequiredUrlConfig(CumulusRDFApplication.this, CUMULUS_RDF_CONFIG_URL);
            Map<String, Object> config;
            synchronized (endpointMutex) {
                config = MutableMap.<String, Object>of("cassandraHostname", endpoint.getHostText(),
                        "cassandraThriftPort", endpoint.getPort());
            }
            String contents = TemplateProcessor.processTemplateContents(
                    ResourceUtils.create(CumulusRDFApplication.this).getResourceAsString(url), config);
            // Copy the file contents to the remote machine
            return DynamicTasks.queue(SshEffectorTasks.put("/tmp/cumulus.yaml").contents(contents)).get();
        }
    });

    // Listen for HOSTNAME changes from the Cassandra fabric to show at least one node is available
    subscribe(cassandra, CassandraDatacenter.HOSTNAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            if (Strings.isNonBlank(event.getValue())) {
                synchronized (endpointMutex) {
                    String hostname = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.HOSTNAME))
                            .getUnchecked();
                    Integer thriftPort = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.THRIFT_PORT))
                            .getUnchecked();
                    HostAndPort current = HostAndPort.fromParts(hostname, thriftPort);

                    // Check if the cluster access point has changed
                    if (!current.equals(endpoint)) {
                        log.info("Setting cluster endpoint to {}", current.toString());
                        endpoint = current;

                        // Reconfigure the CumulusRDF application and restart tomcat if necessary
                        webapp.invoke(cumulusConfig, MutableMap.<String, Object>of());
                        if (webapp.getAttribute(Startable.SERVICE_UP)) {
                            webapp.restart();
                        }
                    }
                }
            }
        }
    });
}

From source file:brooklyn.entity.mesos.framework.marathon.MarathonPortForwarder.java

private void addIptablesRule(Integer hostPort, HostAndPort container) {
    LOG.debug("Using iptables to add access for TCP/{} to {}", hostPort, host);
    List<String> commands = ImmutableList.of(
            BashCommands.sudo(/*from w  w w.  j  a v  a  2  s .c  o m*/
                    String.format("iptables -t nat -A PREROUTING -p tcp --dport %d -j DNAT --to-destination %s",
                            hostPort, container.toString())),
            BashCommands.sudo(String.format(
                    "iptables -A FORWARD -p tcp -d %s --dport %d -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT",
                    container.getHostText(), container.getPort())));
    int result = host.execCommands(MutableMap.of(SshTool.PROP_ALLOCATE_PTY.getName(), true),
            String.format("Open iptables TCP/%d", hostPort), commands);
    if (result != 0) {
        String msg = String.format("Error running iptables update for TCP/%d on %s", hostPort, host);
        LOG.error(msg);
        throw new RuntimeException(msg);
    }
}

From source file:com.netflix.simianarmy.client.chef.ChefClient.java

@Override
/**//  w  w w .j a  v  a 2  s.c o  m
 * connect to the given instance with the given credentials
 */
public SshClient connectSsh(String instanceId, LoginCredentials credentials) {

    for (Node node : context.getChefService().listNodes()) {

        if (node.getName().equals(instanceId)) {
            HostAndPort socket = HostAndPort.fromString(node.getName()).withDefaultPort(22);

            //Use JschSshClient directly, had some problems when trying to go through jclouds
            SshClient ssh = new JschSshClient(new GuiceProxyConfig(), BackoffLimitedRetryHandler.INSTANCE,
                    socket, credentials, 3000);
            LOGGER.info(String.format("Opening ssh connection to %s (%s)", instanceId, socket.toString()));
            ssh.connect();
            return ssh;
        }
    }

    return null;
}

From source file:brooklyn.networking.vclouddirector.natservice.resources.NatServiceResource.java

@Override
public String openPortForwarding(String endpoint, String vDC, String identity, String credential,
        String protocol, String original, String originalPortRange, String translated) {
    LOG.info("creating nat rule {} {} -> {}, on {} @ {} (vDC {})",
            new Object[] { protocol, original, translated, identity, endpoint, vDC });
    HostAndPort originalHostAndPort = HostAndPort.fromString(original);
    HostAndPort translatedHostAndPort = HostAndPort.fromString(translated);
    Preconditions.checkArgument(translatedHostAndPort.hasPort(), "translated %s must include port", translated);
    try {/*from   w  ww  . j a  v  a2  s  .  co m*/
        HostAndPort result = dispatcher().openPortForwarding(endpoint, vDC, identity, credential,
                new PortForwardingConfig().protocol(Protocol.valueOf(protocol.toUpperCase()))
                        .publicEndpoint(originalHostAndPort)
                        .publicPortRange(Strings.isBlank(originalPortRange) ? null
                                : PortRanges.fromString(originalPortRange))
                        .targetEndpoint(translatedHostAndPort));

        return result.toString();
    } catch (Exception e) {
        throw Exceptions.propagate(e);
    }
}

From source file:org.apache.brooklyn.entity.nosql.couchbase.CouchbaseNodeSshDriver.java

@Override
public void rebalance() {
    entity.sensors().set(CouchbaseNode.REBALANCE_STATUS, "explicitly started");
    newScript("rebalance").body.append(couchbaseCli("rebalance") + getCouchbaseHostnameAndCredentials())
            .failOnNonZeroResultCode().execute();

    // wait until the re-balance is started
    // (if it's quick, this might miss it, but it will only block for 30s if so)
    Repeater.create().backoff(Repeater.DEFAULT_REAL_QUICK_PERIOD, 2, Duration.millis(500))
            .limitTimeTo(Duration.THIRTY_SECONDS).until(new Callable<Boolean>() {
                @Override/*  ww w . j av  a2  s.  c  o  m*/
                public Boolean call() throws Exception {
                    for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
                        if (isNodeRebalancing(nodeHostAndPort.toString())) {
                            return true;
                        }
                    }
                    return false;
                }
            }).run();

    entity.sensors().set(CouchbaseNode.REBALANCE_STATUS, "waiting for completion");
    // Wait until the Couchbase node finishes the re-balancing
    Task<Boolean> reBalance = TaskBuilder.<Boolean>builder().displayName("Waiting until node is rebalancing")
            .body(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    return Repeater.create().backoff(Duration.ONE_SECOND, 1.2, Duration.TEN_SECONDS)
                            .limitTimeTo(Duration.FIVE_MINUTES).until(new Callable<Boolean>() {
                                @Override
                                public Boolean call() throws Exception {
                                    for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
                                        if (isNodeRebalancing(nodeHostAndPort.toString())) {
                                            return false;
                                        }
                                    }
                                    return true;
                                }
                            }).run();
                }
            }).build();
    Boolean completed = DynamicTasks.queueIfPossible(reBalance).orSubmitAndBlock().andWaitForSuccess();
    if (completed) {
        entity.sensors().set(CouchbaseNode.REBALANCE_STATUS, "completed");
        ServiceStateLogic.ServiceNotUpLogic.clearNotUpIndicator(getEntity(), "rebalancing");
        log.info("Rebalanced cluster via primary node {}", getEntity());
    } else {
        entity.sensors().set(CouchbaseNode.REBALANCE_STATUS, "timed out");
        ServiceStateLogic.ServiceNotUpLogic.updateNotUpIndicator(getEntity(), "rebalancing",
                "rebalance did not complete within time limit");
        log.warn("Timeout rebalancing cluster via primary node {}", getEntity());
    }
}

From source file:brooklyn.entity.nosql.couchbase.CouchbaseNodeSshDriver.java

@Override
public void rebalance() {
    entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "explicitly started");
    newScript("rebalance").body.append(couchbaseCli("rebalance") + getCouchbaseHostnameAndCredentials())
            .failOnNonZeroResultCode().execute();

    // wait until the re-balance is started
    // (if it's quick, this might miss it, but it will only block for 30s if so)
    Repeater.create().backoff(Duration.millis(10), 2, Duration.millis(500)).limitTimeTo(Duration.THIRTY_SECONDS)
            .until(new Callable<Boolean>() {
                @Override//  w  ww.java  2 s  .  c  om
                public Boolean call() throws Exception {
                    for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
                        if (isNodeRebalancing(nodeHostAndPort.toString())) {
                            return true;
                        }
                    }
                    return false;
                }
            }).run();

    entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "waiting for completion");
    // Wait until the Couchbase node finishes the re-balancing
    Task<Boolean> reBalance = TaskBuilder.<Boolean>builder().name("Waiting until node is rebalancing")
            .body(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    return Repeater.create().backoff(Duration.ONE_SECOND, 1.2, Duration.TEN_SECONDS)
                            .limitTimeTo(Duration.FIVE_MINUTES).until(new Callable<Boolean>() {
                                @Override
                                public Boolean call() throws Exception {
                                    for (HostAndPort nodeHostAndPort : getNodesHostAndPort()) {
                                        if (isNodeRebalancing(nodeHostAndPort.toString())) {
                                            return false;
                                        }
                                    }
                                    return true;
                                }
                            }).run();
                }
            }).build();
    Boolean completed = DynamicTasks.queueIfPossible(reBalance).orSubmitAndBlock().andWaitForSuccess();
    if (completed) {
        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "completed");
        ServiceStateLogic.ServiceNotUpLogic.clearNotUpIndicator(getEntity(), "rebalancing");
        log.info("Rebalanced cluster via primary node {}", getEntity());
    } else {
        entity.setAttribute(CouchbaseNode.REBALANCE_STATUS, "timed out");
        ServiceStateLogic.ServiceNotUpLogic.updateNotUpIndicator(getEntity(), "rebalancing",
                "rebalance did not complete within time limit");
        log.warn("Timeout rebalancing cluster via primary node {}", getEntity());
    }
}

From source file:org.apache.kudu.client.ConnectionCache.java

TabletClient newMasterClient(HostAndPort hostPort) {
    // We should pass a UUID here but we have a chicken and egg problem, we first need to
    // communicate with the masters to find out about them, and that's what we're trying to do.
    // The UUID is just used for logging and cache key, so instead we just use a constructed
    // string with the master host and port as.
    return newClient("master-" + hostPort.toString(), hostPort);
}

From source file:org.apache.brooklyn.entity.nosql.riak.RiakNodeImpl.java

@Override
public void connectSensors() {
    super.connectSensors();
    connectServiceUpIsRunning();/*from   w w  w.j  av  a2s.c  o  m*/
    HostAndPort accessible = BrooklynAccessUtils.getBrooklynAccessibleAddress(this, getRiakWebPort());

    if (isHttpMonitoringEnabled()) {
        HttpFeed.Builder httpFeedBuilder = HttpFeed.builder().entity(this).period(500, TimeUnit.MILLISECONDS)
                .baseUri(String.format("http://%s/stats", accessible.toString()))
                .poll(new HttpPollConfig<Integer>(NODE_GETS)
                        .onSuccess(HttpValueFunctions.jsonContents("node_gets", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(NODE_GETS_TOTAL)
                        .onSuccess(HttpValueFunctions.jsonContents("node_gets_total", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(NODE_PUTS)
                        .onSuccess(HttpValueFunctions.jsonContents("node_puts", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(NODE_PUTS_TOTAL)
                        .onSuccess(HttpValueFunctions.jsonContents("node_puts_total", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(VNODE_GETS)
                        .onSuccess(HttpValueFunctions.jsonContents("vnode_gets", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(VNODE_GETS_TOTAL)
                        .onSuccess(HttpValueFunctions.jsonContents("vnode_gets_total", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(VNODE_PUTS)
                        .onSuccess(HttpValueFunctions.jsonContents("vnode_puts", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(VNODE_PUTS_TOTAL)
                        .onSuccess(HttpValueFunctions.jsonContents("vnode_puts_total", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(READ_REPAIRS_TOTAL)
                        .onSuccess(HttpValueFunctions.jsonContents("read_repairs_total", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(COORD_REDIRS_TOTAL)
                        .onSuccess(HttpValueFunctions.jsonContents("coord_redirs_total", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(MEMORY_PROCESSES_USED)
                        .onSuccess(HttpValueFunctions.jsonContents("memory_processes_used", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(SYS_PROCESS_COUNT)
                        .onSuccess(HttpValueFunctions.jsonContents("sys_process_count", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(PBC_CONNECTS)
                        .onSuccess(HttpValueFunctions.jsonContents("pbc_connects", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<Integer>(PBC_ACTIVE)
                        .onSuccess(HttpValueFunctions.jsonContents("pbc_active", Integer.class))
                        .onFailureOrException(Functions.constant(-1)))
                .poll(new HttpPollConfig<List<String>>(RING_MEMBERS).onSuccess(
                        Functionals.chain(HttpValueFunctions.jsonContents("ring_members", String[].class),
                                new Function<String[], List<String>>() {
                                    @Nullable
                                    @Override
                                    public List<String> apply(@Nullable String[] strings) {
                                        return Arrays.asList(strings);
                                    }
                                }))
                        .onFailureOrException(Functions.constant(Arrays.asList(new String[0]))));

        for (AttributeSensor<Integer> sensor : ONE_MINUTE_SENSORS) {
            httpFeedBuilder.poll(new HttpPollConfig<Integer>(sensor).period(Duration.ONE_MINUTE)
                    .onSuccess(HttpValueFunctions.jsonContents(sensor.getName().substring(5), Integer.class))
                    .onFailureOrException(Functions.constant(-1)));
        }

        httpFeed = httpFeedBuilder.build();
    }

    enrichers().add(
            Enrichers.builder().combining(NODE_GETS, NODE_PUTS).computingSum().publishing(NODE_OPS).build());
    enrichers().add(Enrichers.builder().combining(NODE_GETS_TOTAL, NODE_PUTS_TOTAL).computingSum()
            .publishing(NODE_OPS_TOTAL).build());
    WebAppServiceMethods.connectWebAppServerPolicies(this);
}

From source file:com.b2international.snowowl.datastore.server.CDORepositoryManager.java

@Override
protected void doBeforeActivate() throws Exception {

    //read extension points first, then create managed items 
    super.doBeforeActivate();

    Net4jUtil.prepareContainer(IPluginContainer.INSTANCE);
    JVMUtil.prepareContainer(IPluginContainer.INSTANCE);
    TCPUtil.prepareContainer(IPluginContainer.INSTANCE);
    CDONet4jUtil.prepareContainer(IPluginContainer.INSTANCE);
    CDONet4jServerUtil.prepareContainer(IPluginContainer.INSTANCE);

    registerCustomProtocols();//from   www  .j a va 2 s. c  om

    LifecycleUtil.activate(IPluginContainer.INSTANCE);

    final HostAndPort hostAndPort = getRepositoryConfiguration().getHostAndPort();
    // open port in server environments
    if (SnowOwlApplication.INSTANCE.getEnviroment().isServer()) {
        IAcceptor acceptor = TCPUtil.getAcceptor(IPluginContainer.INSTANCE, hostAndPort.toString()); // Start the TCP transport
        if (getSnowOwlConfiguration().isGzip()) {
            IPluginContainer.INSTANCE.addPostProcessor(
                    new TcpGZIPStreamWrapperInjector(CDOProtocolConstants.PROTOCOL_NAME, acceptor));
        }

        LOGGER.info("Listening on {} for connections", hostAndPort);
    }

    JVMUtil.getAcceptor(IPluginContainer.INSTANCE, Net4jUtils.NET_4_J_CONNECTOR_NAME); // Start the JVM transport
}