Example usage for com.google.common.net HostAndPort fromParts

List of usage examples for com.google.common.net HostAndPort fromParts

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort fromParts.

Prototype

public static HostAndPort fromParts(String host, int port) 

Source Link

Document

Build a HostAndPort instance from separate host and port values.

Usage

From source file:com.streamsets.pipeline.kafka.impl.KafkaLowLevelConsumer09.java

private HostAndPort findNewLeader(HostAndPort oldLeader, String topic, int partition) throws StageException {
    //try 3 times to find a new leader
    for (int i = 0; i < 3; i++) {
        boolean sleep;
        PartitionMetadata metadata = getPartitionMetadata(replicaBrokers, topic, partition);
        if (metadata == null || metadata.leader() == null) {
            sleep = true;/*from   w  w  w.  jav a2 s.c om*/
        } else if (oldLeader.getHostText().equalsIgnoreCase(metadata.leader().host()) && i == 0) {
            //leader has not yet changed, give zookeeper sometime
            sleep = true;
        } else {
            return HostAndPort.fromParts(metadata.leader().host(), metadata.leader().port());
        }
        if (sleep) {
            ThreadUtil.sleep(ONE_SECOND);
        }
    }
    LOG.error(KafkaErrors.KAFKA_21.getMessage());
    throw new StageException(KafkaErrors.KAFKA_21);
}

From source file:org.apache.beam.runners.dataflow.worker.windmill.GrpcWindmillServer.java

public GrpcWindmillServer(StreamingDataflowWorkerOptions options) throws IOException {
    this.options = options;
    this.streamingRpcBatchLimit = options.getWindmillServiceStreamingRpcBatchLimit();
    this.endpoints = ImmutableSet.of();
    if (options.getWindmillServiceEndpoint() != null) {
        Set<HostAndPort> endpoints = new HashSet<>();
        for (String endpoint : Splitter.on(',').split(options.getWindmillServiceEndpoint())) {
            endpoints.add(HostAndPort.fromString(endpoint).withDefaultPort(options.getWindmillServicePort()));
        }//  ww w  .  j  a va  2s . c om
        initializeWindmillService(endpoints);
    } else if (!streamingEngineEnabled() && options.getLocalWindmillHostport() != null) {
        int portStart = options.getLocalWindmillHostport().lastIndexOf(':');
        String endpoint = options.getLocalWindmillHostport().substring(0, portStart);
        assert ("grpc:localhost".equals(endpoint));
        int port = Integer.parseInt(options.getLocalWindmillHostport().substring(portStart + 1));
        this.endpoints = ImmutableSet.<HostAndPort>of(HostAndPort.fromParts("localhost", port));
        initializeLocalHost(port);
    }
}

From source file:org.springframework.xd.ec2.cloud.AWSInstanceChecker.java

/**
 * In cases where the service does not have any exposed ports we can check
 * to see if it started. We do have JMX capabilities, but they are not
 * active at this time./* w w w.j a  v a2  s  .  com*/
 * 
 * @param instance
 *            The running instance you want to examine.
 * @param managementPort
 *            the jmx port .
 * @return
 */
public boolean checkContainerProcess(RunningInstance instance, int managementPort) {
    Assert.notNull(instance, "instance can not be null");
    boolean result = true;
    RunningInstance localInstance = AWSInstanceProvisioner.findInstanceById(client, instance.getId());
    final SocketOpen socketOpen = computeService.getContext().utils().injector().getInstance(SocketOpen.class);
    final Predicate<HostAndPort> socketTester = retry(socketOpen, 300, 1, TimeUnit.SECONDS);
    LOGGER.info(String.format("Awaiting XD container to start %n"));
    if (!socketTester.apply(HostAndPort.fromParts(localInstance.getIpAddress(), managementPort))) {
        result = false;
        LOGGER.warn("timeout waiting for container to start: " + localInstance.getIpAddress());
        return result;
    }
    LOGGER.info(String.format("Container started%n"));
    return result;
}

From source file:brooklyn.demo.CumulusRDFApplication.java

/**
 * Create the application entities://  ww  w.ja  v  a  2  s. com
 * <ul>
 * <li>A {@link CassandraFabric} of {@link CassandraDatacenter}s containing {@link CassandraNode}s
 * <li>A {@link TomcatServer}
 * </ul>
 */
@Override
public void initApp() {
    // Cassandra cluster
    EntitySpec<CassandraDatacenter> clusterSpec = EntitySpec.create(CassandraDatacenter.class)
            .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(CassandraNode.class)
                    //FIXME can probably use JMXMP_AND_RMI now, to deploy to GCE and elsewhere
                    .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
                    .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
                    .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
                    .configure(CassandraNode.THRIFT_PORT,
                            PortRanges.fromInteger(getConfig(CASSANDRA_THRIFT_PORT)))
                    .enricher(EnricherSpec.create(ServiceFailureDetector.class))
                    .policy(PolicySpec.create(ServiceRestarter.class).configure(
                            ServiceRestarter.FAILURE_SENSOR_TO_MONITOR, ServiceFailureDetector.ENTITY_FAILED)))
            .policy(PolicySpec.create(ServiceReplacer.class).configure(
                    ServiceReplacer.FAILURE_SENSOR_TO_MONITOR, ServiceRestarter.ENTITY_RESTART_FAILED));

    if (getConfig(MULTI_REGION_FABRIC)) {
        cassandra = addChild(
                EntitySpec.create(CassandraFabric.class).configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                        .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)) // per location
                        .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME,
                                "brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
                        .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL,
                                "classpath://brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
                        .configure(CassandraFabric.MEMBER_SPEC, clusterSpec));
    } else {
        cassandra = addChild(
                EntitySpec.create(clusterSpec).configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                        .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)));
    }

    // Tomcat web-app server
    webapp = addChild(EntitySpec.create(TomcatServer.class)
            .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
            .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
            .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
            .configure(JavaWebAppService.ROOT_WAR,
                    "https://cumulusrdf.googlecode.com/svn/wiki/downloads/cumulusrdf-1.0.1.war")
            .configure(UsesJava.JAVA_SYSPROPS, MutableMap.of("cumulusrdf.config-file", "/tmp/cumulus.yaml")));

    // Add an effector to tomcat to reconfigure with a new YAML config file
    ((EntityInternal) webapp).getMutableEntityType().addEffector(cumulusConfig, new EffectorBody<Void>() {
        @Override
        public Void call(ConfigBag parameters) {
            // Process the YAML template given in the application config
            String url = Entities.getRequiredUrlConfig(CumulusRDFApplication.this, CUMULUS_RDF_CONFIG_URL);
            Map<String, Object> config;
            synchronized (endpointMutex) {
                config = MutableMap.<String, Object>of("cassandraHostname", endpoint.getHostText(),
                        "cassandraThriftPort", endpoint.getPort());
            }
            String contents = TemplateProcessor.processTemplateContents(
                    ResourceUtils.create(CumulusRDFApplication.this).getResourceAsString(url), config);
            // Copy the file contents to the remote machine
            return DynamicTasks.queue(SshEffectorTasks.put("/tmp/cumulus.yaml").contents(contents)).get();
        }
    });

    // Listen for HOSTNAME changes from the Cassandra fabric to show at least one node is available
    subscribe(cassandra, CassandraDatacenter.HOSTNAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            if (Strings.isNonBlank(event.getValue())) {
                synchronized (endpointMutex) {
                    String hostname = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.HOSTNAME))
                            .getUnchecked();
                    Integer thriftPort = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.THRIFT_PORT))
                            .getUnchecked();
                    HostAndPort current = HostAndPort.fromParts(hostname, thriftPort);

                    // Check if the cluster access point has changed
                    if (!current.equals(endpoint)) {
                        log.info("Setting cluster endpoint to {}", current.toString());
                        endpoint = current;

                        // Reconfigure the CumulusRDF application and restart tomcat if necessary
                        webapp.invoke(cumulusConfig, MutableMap.<String, Object>of());
                        if (webapp.getAttribute(Startable.SERVICE_UP)) {
                            webapp.restart();
                        }
                    }
                }
            }
        }
    });
}

From source file:org.spring.springxdcloudInstaller.MainApp.java

static RunningInstance blockNodeInstanceRunning(EC2Client client, RunningInstance instance)
        throws TimeoutException {
    // create utilities that wait for the instance to finish
    RetryablePredicate<RunningInstance> runningTester = new RetryablePredicate<RunningInstance>(
            new InstanceStateRunning(client), 180, 5, TimeUnit.SECONDS);

    System.out.printf("%d: %s awaiting instance to run %n", System.currentTimeMillis(), instance.getId());
    if (!runningTester.apply(instance))
        throw new TimeoutException("timeout waiting for instance to run: " + instance.getId());

    instance = findInstanceById(client, instance.getId());

    RetryablePredicate<HostAndPort> socketTester = new RetryablePredicate<HostAndPort>(
            new InetSocketAddressConnect(), 300, 1, TimeUnit.SECONDS);
    System.out.printf("%d: %s awaiting ssh service to start%n", System.currentTimeMillis(),
            instance.getIpAddress());/*  w ww  .  j  a va  2s. c o m*/
    if (!socketTester.apply(HostAndPort.fromParts(instance.getIpAddress(), 22)))
        throw new TimeoutException("timeout waiting for ssh to start: " + instance.getIpAddress());

    System.out.printf("%d: %s ssh service started%n", System.currentTimeMillis(), instance.getIpAddress());

    System.out.printf("%d: %s http service started%n", System.currentTimeMillis(), instance.getIpAddress());
    System.out.printf("instance %s ready%n", instance.getId());
    System.out.printf("ip address: %s%n", instance.getIpAddress());
    System.out.printf("dns name: %s%n", instance.getDnsName());
    return instance;
}

From source file:ratpack.http.client.internal.RequestActionSupport.java

public void connect(final Downstream<? super T> downstream) throws Exception {
    final Bootstrap b = new Bootstrap();
    b.group(this.execution.getEventLoop()).channel(ChannelImplDetector.getSocketChannelImpl())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override/*from  ww w .ja  v  a  2 s.  c o m*/
                protected void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline p = ch.pipeline();

                    if (finalUseSsl) {
                        SSLEngine sslEngine;
                        if (requestSpecBacking.getSslContext() != null) {
                            sslEngine = requestSpecBacking.getSslContext().createSSLEngine();
                        } else {
                            sslEngine = SSLContext.getDefault().createSSLEngine();
                        }
                        sslEngine.setUseClientMode(true);
                        p.addLast("ssl", new SslHandler(sslEngine));
                    }

                    p.addLast("codec", new HttpClientCodec());
                    p.addLast("readTimeout",
                            new ReadTimeoutHandler(requestParams.readTimeoutNanos, TimeUnit.NANOSECONDS));

                    p.addLast("redirectHandler", new SimpleChannelInboundHandler<HttpObject>(false) {
                        boolean readComplete;
                        boolean redirected;

                        @Override
                        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
                            if (!readComplete) {
                                error(downstream, new PrematureChannelClosureException(
                                        "Server " + uri + " closed the connection prematurely"));
                            }
                            super.channelReadComplete(ctx);
                        }

                        @Override
                        public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
                                throws Exception {
                            super.exceptionCaught(ctx, cause);
                        }

                        @Override
                        protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg)
                                throws Exception {
                            if (msg instanceof HttpResponse) {
                                readComplete = true;
                                final HttpResponse response = (HttpResponse) msg;
                                int maxRedirects = requestSpecBacking.getMaxRedirects();
                                int status = response.status().code();
                                String locationValue = response.headers().get(HttpHeaderConstants.LOCATION);

                                Action<? super RequestSpec> redirectConfigurer = RequestActionSupport.this.requestConfigurer;
                                if (isRedirect(status) && redirectCounter < maxRedirects
                                        && locationValue != null) {
                                    final Function<? super ReceivedResponse, Action<? super RequestSpec>> onRedirect = requestSpecBacking
                                            .getOnRedirect();
                                    if (onRedirect != null) {
                                        final Action<? super RequestSpec> onRedirectResult = onRedirect
                                                .apply(toReceivedResponse(response));
                                        if (onRedirectResult == null) {
                                            redirectConfigurer = null;
                                        } else {
                                            redirectConfigurer = redirectConfigurer.append(onRedirectResult);
                                        }
                                    }

                                    if (redirectConfigurer != null) {
                                        Action<? super RequestSpec> redirectRequestConfig = s -> {
                                            if (status == 301 || status == 302) {
                                                s.method("GET");
                                            }
                                        };
                                        redirectRequestConfig = redirectRequestConfig
                                                .append(redirectConfigurer);

                                        URI locationUrl;
                                        if (ABSOLUTE_PATTERN.matcher(locationValue).matches()) {
                                            locationUrl = new URI(locationValue);
                                        } else {
                                            locationUrl = new URI(uri.getScheme(), null, uri.getHost(),
                                                    uri.getPort(), locationValue, null, null);
                                        }

                                        buildRedirectRequestAction(redirectRequestConfig, locationUrl,
                                                redirectCounter + 1).connect(downstream);
                                        redirected = true;
                                    }
                                }
                            }

                            if (!redirected) {
                                ctx.fireChannelRead(msg);
                            }
                        }
                    });

                    if (requestSpecBacking.isDecompressResponse()) {
                        p.addLast(new HttpContentDecompressor());
                    }
                    addResponseHandlers(p, downstream);
                }

                @Override
                public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
                    ctx.close();
                    error(downstream, cause);
                }
            });

    ChannelFuture connectFuture = b.connect(host, port);
    connectFuture.addListener(f1 -> {
        if (connectFuture.isSuccess()) {
            String fullPath = getFullPath(uri);
            FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
                    HttpMethod.valueOf(requestSpecBacking.getMethod()), fullPath, requestSpecBacking.getBody());
            if (headers.get(HttpHeaderConstants.HOST) == null) {
                HostAndPort hostAndPort = HostAndPort.fromParts(host, port);
                headers.set(HttpHeaderConstants.HOST, hostAndPort.toString());
            }
            headers.set(HttpHeaderConstants.CONNECTION, HttpHeaderValues.CLOSE);
            int contentLength = request.content().readableBytes();
            if (contentLength > 0) {
                headers.set(HttpHeaderConstants.CONTENT_LENGTH, Integer.toString(contentLength));
            }

            HttpHeaders requestHeaders = request.headers();

            for (String name : headers.getNames()) {
                requestHeaders.set(name, headers.getAll(name));
            }

            ChannelFuture writeFuture = connectFuture.channel().writeAndFlush(request);
            writeFuture.addListener(f2 -> {
                if (!writeFuture.isSuccess()) {
                    writeFuture.channel().close();
                    error(downstream, writeFuture.cause());
                }
            });
        } else {
            connectFuture.channel().close();
            error(downstream, connectFuture.cause());
        }
    });
}

From source file:org.apache.brooklyn.demo.CumulusRDFApplication.java

/**
 * Create the application entities:/*from   w w w  . j  a v  a  2  s .  c  o m*/
 * <ul>
 * <li>A {@link CassandraFabric} of {@link CassandraDatacenter}s containing {@link CassandraNode}s
 * <li>A {@link TomcatServer}
 * </ul>
 */
@Override
public void initApp() {
    // Cassandra cluster
    EntitySpec<CassandraDatacenter> clusterSpec = EntitySpec.create(CassandraDatacenter.class)
            .configure(CassandraDatacenter.MEMBER_SPEC, EntitySpec.create(CassandraNode.class)
                    //FIXME can probably use JMXMP_AND_RMI now, to deploy to GCE and elsewhere
                    .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
                    .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
                    .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
                    .configure(CassandraNode.THRIFT_PORT,
                            PortRanges.fromInteger(getConfig(CASSANDRA_THRIFT_PORT)))
                    .enricher(EnricherSpec.create(ServiceFailureDetector.class))
                    .policy(PolicySpec.create(ServiceRestarter.class).configure(
                            ServiceRestarter.FAILURE_SENSOR_TO_MONITOR, ServiceFailureDetector.ENTITY_FAILED)))
            .policy(PolicySpec.create(ServiceReplacer.class).configure(
                    ServiceReplacer.FAILURE_SENSOR_TO_MONITOR, ServiceRestarter.ENTITY_RESTART_FAILED));

    if (getConfig(MULTI_REGION_FABRIC)) {
        cassandra = addChild(EntitySpec.create(CassandraFabric.class)
                .configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)) // per location
                .configure(CassandraDatacenter.ENDPOINT_SNITCH_NAME,
                        "org.apache.brooklyn.entity.nosql.cassandra.customsnitch.MultiCloudSnitch")
                .configure(CassandraNode.CUSTOM_SNITCH_JAR_URL,
                        "classpath://org/apache/brooklyn/entity/nosql/cassandra/cassandra-multicloud-snitch.jar")
                .configure(CassandraFabric.MEMBER_SPEC, clusterSpec));
    } else {
        cassandra = addChild(
                EntitySpec.create(clusterSpec).configure(CassandraDatacenter.CLUSTER_NAME, "Brooklyn")
                        .configure(CassandraDatacenter.INITIAL_SIZE, getConfig(CASSANDRA_CLUSTER_SIZE)));
    }

    // Tomcat web-app server
    webapp = addChild(EntitySpec.create(TomcatServer.class)
            .configure(UsesJmx.JMX_AGENT_MODE, UsesJmx.JmxAgentModes.JMX_RMI_CUSTOM_AGENT)
            .configure(UsesJmx.JMX_PORT, PortRanges.fromString("11099+"))
            .configure(UsesJmx.RMI_REGISTRY_PORT, PortRanges.fromString("9001+"))
            .configure(JavaWebAppService.ROOT_WAR,
                    "https://cumulusrdf.googlecode.com/svn/wiki/downloads/cumulusrdf-1.0.1.war")
            .configure(UsesJava.JAVA_SYSPROPS, MutableMap.of("cumulusrdf.config-file", "/tmp/cumulus.yaml")));

    // Add an effector to tomcat to reconfigure with a new YAML config file
    ((EntityInternal) webapp).getMutableEntityType().addEffector(cumulusConfig, new EffectorBody<Void>() {
        @Override
        public Void call(ConfigBag parameters) {
            // Process the YAML template given in the application config
            String url = Entities.getRequiredUrlConfig(CumulusRDFApplication.this, CUMULUS_RDF_CONFIG_URL);
            Map<String, Object> config;
            synchronized (endpointMutex) {
                config = MutableMap.<String, Object>of("cassandraHostname", endpoint.getHostText(),
                        "cassandraThriftPort", endpoint.getPort());
            }
            String contents = TemplateProcessor.processTemplateContents(
                    ResourceUtils.create(CumulusRDFApplication.this).getResourceAsString(url), config);
            // Copy the file contents to the remote machine
            return DynamicTasks.queue(SshEffectorTasks.put("/tmp/cumulus.yaml").contents(contents)).get();
        }
    });

    // Listen for HOSTNAME changes from the Cassandra fabric to show at least one node is available
    subscriptions().subscribe(cassandra, CassandraDatacenter.HOSTNAME, new SensorEventListener<String>() {
        @Override
        public void onEvent(SensorEvent<String> event) {
            if (Strings.isNonBlank(event.getValue())) {
                synchronized (endpointMutex) {
                    String hostname = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.HOSTNAME))
                            .getUnchecked();
                    Integer thriftPort = Entities
                            .submit(CumulusRDFApplication.this, DependentConfiguration
                                    .attributeWhenReady(cassandra, CassandraDatacenter.THRIFT_PORT))
                            .getUnchecked();
                    HostAndPort current = HostAndPort.fromParts(hostname, thriftPort);

                    // Check if the cluster access point has changed
                    if (!current.equals(endpoint)) {
                        log.info("Setting cluster endpoint to {}", current.toString());
                        endpoint = current;

                        // Reconfigure the CumulusRDF application and restart tomcat if necessary
                        webapp.invoke(cumulusConfig, MutableMap.<String, Object>of());
                        if (webapp.getAttribute(Startable.SERVICE_UP)) {
                            webapp.restart();
                        }
                    }
                }
            }
        }
    });
}

From source file:ezbake.discovery.stethoscope.client.StethoscopeClient.java

public void init() {
    Preconditions.checkNotNull(this.configuration, "No properties have been set!");

    // Merge configuration parameters from the command line
    if (!additionalConfigurationDirs.isEmpty()) {
        List<EzConfigurationLoader> loaders = Lists.newArrayList();
        for (Path configDir : additionalConfigurationDirs) {
            loaders.add(new DirectoryConfigurationLoader(configDir));
        }/*w w w.  j  a va  2s .c om*/
        try {
            Properties loadedProps = new EzConfiguration(
                    loaders.toArray(new EzConfigurationLoader[loaders.size()])).getProperties();
            configuration.putAll(loadedProps);
        } catch (EzConfigurationLoaderException e) {
            logger.warn("Failed to load additional configuration directories", e);
        }
    }
    if (!additionalProperties.isEmpty()) {
        configuration.putAll(additionalProperties);
    }

    // Check to see if our host and port were set properly, if not then use what we can get from environment
    if (Strings.isNullOrEmpty(privateServiceHostname) || privateServicePort < 1) {
        HostAndPort openshiftHostAndPort = OpenShiftUtil.getThriftPrivateInfo();
        this.privateServiceHostname = openshiftHostAndPort.getHostText();
        this.privateServicePort = openshiftHostAndPort.getPort();
    }
    this.privateHostAndPort = HostAndPort.fromParts(privateServiceHostname, privateServicePort);
    this.publicHostAndPort = HostAndPort.fromParts(publicserviceHostname, publicservicePort);

    // Check to see if our checkinInterval was set, if not then lets get it from EzConfiguration
    if (checkinInterval == -1) {
        String checkinProp = configuration.getProperty(STETHOSCOPE_CHECKIN_INTERVAL_MINUTES);
        String errorMsg = String.format("Checkin Interval was NOT specified please set %s in ezconfiguration",
                STETHOSCOPE_CHECKIN_INTERVAL_MINUTES);
        Preconditions.checkState(!Strings.isNullOrEmpty(checkinProp), errorMsg);
        this.checkinInterval = NumberUtils.toInt(checkinProp);
    }

    EzBakeApplicationConfigurationHelper appHelper = new EzBakeApplicationConfigurationHelper(configuration);
    this.appName = appHelper.getApplicationName();
    this.serviceName = appHelper.getServiceName();
    this.numRetries = configuration.getInteger(NUM_RETRIES_FOR_CREATING_CLIENT, 4);

    this.serviceDiscovery = new ServiceDiscoveryClient(configuration);
}

From source file:brooklyn.networking.vclouddirector.PortForwarderVcloudDirector.java

@Override
public boolean closePortForwarding(HasNetworkAddresses targetMachine, int targetPort,
        HostAndPort publicEndpoint, Protocol protocol) {
    String targetIp = Iterables.getFirst(
            Iterables.concat(targetMachine.getPrivateAddresses(), targetMachine.getPublicAddresses()), null);
    if (targetIp == null) {
        LOG.warn("Failed to close port-forwarding rule because no IP in {}, on {}: {} -> {}",
                new Object[] { targetMachine, this, targetPort, publicEndpoint });
        return false;
    }// w w w . j  a  v  a 2 s  .c o  m

    return closePortForwarding(HostAndPort.fromParts(targetIp, targetPort), publicEndpoint, protocol);
}

From source file:org.apache.brooklyn.location.jclouds.DefaultConnectivityResolver.java

/**
 * Combines the given resolve options with the customiser's configuration to determine the
 * best address and credential pair for management. In particular, if the resolve options
 * allow it will check that the credential is actually valid for the address.
 *//*from   w w w. ja  va2 s .  c o m*/
@Override
public ManagementAddressResolveResult resolve(JcloudsLocation location, NodeMetadata node, ConfigBag config,
        ConnectivityResolverOptions options) {
    LOG.debug("{} resolving management parameters for {}, node={}, config={}, options={}",
            new Object[] { this, location, node, config, options });
    final Stopwatch timer = Stopwatch.createStarted();
    // Should only be null in tests.
    final Entity contextEntity = getContextEntity(config);
    if (shouldPublishNetworks() && !options.isRebinding() && contextEntity != null) {
        publishNetworks(node, contextEntity);
    }
    HostAndPort hapChoice = null;
    LoginCredentials credChoice = null;

    final Iterable<HostAndPort> managementCandidates = getManagementCandidates(location, node, config, options);
    Iterable<LoginCredentials> credentialCandidates = Collections.emptyList();
    if (!Iterables.isEmpty(managementCandidates)) {
        credentialCandidates = getCredentialCandidates(location, node, options, config);

        // Try each pair of address and credential until one succeeds.
        if (shouldCheckCredentials() && options.pollForReachableAddresses()) {
            for (HostAndPort hap : managementCandidates) {
                for (LoginCredentials cred : credentialCandidates) {
                    LOG.trace("Testing host={} with credential={}", hap, cred);
                    if (checkCredential(location, hap, cred, config, options.isWindows())) {
                        hapChoice = hap;
                        credChoice = cred;
                        break;
                    }
                }
                if (hapChoice != null)
                    break;
            }
        } else if (shouldCheckCredentials()) {
            LOG.debug("{} set on {} but pollForFirstReachableAddress={}",
                    new Object[] { CHECK_CREDENTIALS.getName(), this, options.pollForReachableAddresses() });
        }
    }

    if (hapChoice == null) {
        LOG.trace("Choosing first management candidate given node={} and mode={}", node, getNetworkMode());
        hapChoice = Iterables.getFirst(managementCandidates, null);
    }
    if (hapChoice == null) {
        LOG.trace("Choosing first address of node={} in mode={}", node, getNetworkMode());
        final Iterator<String> hit = getResolvableAddressesWithMode(node).iterator();
        if (hit.hasNext())
            HostAndPort.fromHost(hit.next());
    }

    if (hapChoice == null) {
        LOG.error("None of the addresses of node {} are reachable in mode {}",
                new Object[] { node, getNetworkMode() });
        throw new IllegalStateException(
                "Could not determine management address for node: " + node + " in mode: " + getNetworkMode());
    }

    if (credChoice == null) {
        credChoice = Iterables.getFirst(credentialCandidates, null);
        if (credChoice == null) {
            throw new IllegalStateException("No credentials configured for " + location);
        }
    }

    if (contextEntity != null) {
        contextEntity.sensors().set(Attributes.ADDRESS, hapChoice.getHostText());
    }

    // Treat AWS as a special case because the DNS fully qualified hostname in AWS is
    // (normally?!) a good way to refer to the VM from both inside and outside of the region.
    if (!isNetworkModeSet() && !options.isWindows()) {
        final boolean lookupAwsHostname = Boolean.TRUE
                .equals(config.get(JcloudsLocationConfig.LOOKUP_AWS_HOSTNAME));
        String provider = config.get(JcloudsLocationConfig.CLOUD_PROVIDER);
        if (provider == null) {
            provider = location.getProvider();
        }
        if (options.waitForConnectable() && "aws-ec2".equals(provider) && lookupAwsHostname) {
            // getHostnameAws sshes to the machine and curls 169.254.169.254/latest/meta-data/public-hostname.
            try {
                LOG.debug("Resolving AWS hostname of {}", location);
                String result = location.getHostnameAws(hapChoice, credChoice, config);
                hapChoice = HostAndPort.fromParts(result, hapChoice.getPort());
                LOG.debug("Resolved AWS hostname of {}: {}", location, result);
            } catch (Exception e) {
                LOG.debug("Failed to resolve AWS hostname of " + location, e);
            }
        }
    }

    ManagementAddressResolveResult result = new ManagementAddressResolveResult(hapChoice, credChoice);
    LOG.debug("{} resolved management parameters for {} in {}: {}",
            new Object[] { this, location, Duration.of(timer), result });
    return result;
}