Example usage for com.google.common.net HostAndPort fromString

List of usage examples for com.google.common.net HostAndPort fromString

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort fromString.

Prototype

public static HostAndPort fromString(String hostPortString) 

Source Link

Document

Split a freeform string into a host and port, without strict validation.

Usage

From source file:org.apache.accumulo.core.client.impl.InstanceOperationsImpl.java

@Override
public List<ActiveCompaction> getActiveCompactions(String tserver)
        throws AccumuloException, AccumuloSecurityException {
    final HostAndPort parsedTserver = HostAndPort.fromString(tserver);
    Client client = null;// w  w w.  j  av a 2s .  c o m
    try {
        client = ThriftUtil.getTServerClient(parsedTserver, context);

        List<ActiveCompaction> as = new ArrayList<ActiveCompaction>();
        for (org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction activeCompaction : client
                .getActiveCompactions(Tracer.traceInfo(), context.rpcCreds())) {
            as.add(new ActiveCompactionImpl(context.getInstance(), activeCompaction));
        }
        return as;
    } catch (TTransportException e) {
        throw new AccumuloException(e);
    } catch (ThriftSecurityException e) {
        throw new AccumuloSecurityException(e.user, e.code, e);
    } catch (TException e) {
        throw new AccumuloException(e);
    } finally {
        if (client != null)
            ThriftUtil.returnClient(client);
    }
}

From source file:org.apache.brooklyn.location.byon.ByonLocationResolver.java

protected LocationSpec<? extends MachineLocation> parseMachine(Map<String, ?> vals,
        Class<? extends MachineLocation> locationClass, Map<String, ?> defaults, String specForErrMsg) {
    Map<String, Object> valSanitized = Sanitizer.sanitize(vals);
    Map<String, Object> machineConfig = MutableMap.copyOf(vals);

    String osFamily = (String) machineConfig.remove(OS_FAMILY.getName());
    String ssh = (String) machineConfig.remove("ssh");
    String winrm = (String) machineConfig.remove("winrm");
    Map<Integer, String> tcpPortMappings = (Map<Integer, String>) machineConfig.get("tcpPortMappings");

    checkArgument(ssh != null ^ winrm != null, "Must specify exactly one of 'ssh' or 'winrm' for machine: %s",
            valSanitized);/*from w  w  w  .j a  v  a  2s.  co m*/

    UserAndHostAndPort userAndHostAndPort;
    String host;
    int port;
    if (ssh != null) {
        userAndHostAndPort = parseUserAndHostAndPort((String) ssh, 22);
    } else {
        userAndHostAndPort = parseUserAndHostAndPort((String) winrm, 5985);
    }

    // If there is a tcpPortMapping defined for the connection-port, then use that for ssh/winrm machine
    port = userAndHostAndPort.getHostAndPort().getPort();
    if (tcpPortMappings != null && tcpPortMappings.containsKey(port)) {
        String override = tcpPortMappings.get(port);
        HostAndPort hostAndPortOverride = HostAndPort.fromString(override);
        if (!hostAndPortOverride.hasPort()) {
            throw new IllegalArgumentException(
                    "Invalid portMapping ('" + override + "') for port " + port + " in " + specForErrMsg);
        }
        port = hostAndPortOverride.getPort();
        host = hostAndPortOverride.getHostText().trim();
    } else {
        host = userAndHostAndPort.getHostAndPort().getHostText().trim();
    }

    machineConfig.put("address", host);
    try {
        InetAddress.getByName(host);
    } catch (Exception e) {
        throw new IllegalArgumentException(
                "Invalid host '" + host + "' specified in '" + specForErrMsg + "': " + e);
    }

    if (userAndHostAndPort.getUser() != null) {
        checkArgument(!vals.containsKey("user"), "Must not specify user twice for machine: %s", valSanitized);
        machineConfig.put("user", userAndHostAndPort.getUser());
    }
    if (userAndHostAndPort.getHostAndPort().hasPort()) {
        checkArgument(!vals.containsKey("port"), "Must not specify port twice for machine: %s", valSanitized);
        machineConfig.put("port", port);
    }
    for (Map.Entry<String, ?> entry : defaults.entrySet()) {
        if (!machineConfig.containsKey(entry.getKey())) {
            machineConfig.put(entry.getKey(), entry.getValue());
        }
    }

    Class<? extends MachineLocation> locationClassHere = locationClass;
    if (osFamily != null) {
        locationClassHere = getLocationClass(osFamily);
    }

    return LocationSpec.create(locationClassHere).configure(machineConfig);
}

From source file:com.streamsets.pipeline.kafka.impl.KafkaValidationUtil08.java

private static List<HostAndPort> getKafkaBrokers(String brokerList) {
    List<HostAndPort> kafkaBrokers = new ArrayList<>();
    if (brokerList != null && !brokerList.isEmpty()) {
        String[] brokers = brokerList.split(",");
        for (String broker : brokers) {
            try {
                kafkaBrokers.add(HostAndPort.fromString(broker));
            } catch (IllegalArgumentException e) {
                // Ignore broker
            }/*from  w w  w .j  a  v a 2s. c  o m*/
        }
    }
    return kafkaBrokers;
}

From source file:nl.esciencecenter.xenon.adaptors.shared.ssh.SSHUtil.java

public static String getHost(String adaptorName, String location) throws InvalidLocationException {
    // Parse locations of the format: hostname[:port] and return the host
    try {//from ww w  . j a v  a 2 s  .c om
        return validateHost(adaptorName, HostAndPort.fromString(location).getHostText().trim());
    } catch (Exception e) {
        // TODO: could be a name in ssh_config instead ??
        throw new InvalidLocationException(adaptorName, "Failed to parse location: " + location);
    }
}

From source file:ezbake.thrift.ThriftClientPool.java

protected void AddEndpoints(String service, List<String> endPoints) {
    synchronized (serviceMap) {
        serviceMap.removeAll(service);/*from ww  w .j  a v a 2s  . c o  m*/
        for (String endPoint : endPoints) {
            try {
                HostAndPort hostAndPort = HostAndPort.fromString(endPoint);
                serviceMap.put(service, hostAndPort);
            } catch (Exception ex) {
                logger.warn("Failed to connect to host(" + endPoint + ") Trying next...", ex);
            }
        }
    }
}

From source file:org.sfs.SfsSingletonServer.java

@Override
public void start(final Future<Void> startedResult) {
    Preconditions.checkState(STARTED.compareAndSet(false, true), "Only one instance is allowed.");
    final Server _this = this;

    LOGGER.info("Starting verticle " + _this);

    initRxSchedulers(vertx);//ww w  .  java 2 s .  c om

    JsonObject config = config();

    String fsHome = ConfigHelper.getFieldOrEnv(config, "fs.home");
    if (fsHome == null) {
        fsHome = System.getProperty("user.home");
        if (fsHome != null) {
            fsHome = Paths.get(fsHome, "data", "sfs").toString();
        }
    }
    config.put("fs.home", fsHome);
    LOGGER.info(String.format("Config: %s", config.encodePrettily()));

    Path fileSystem = Paths.get(fsHome);

    testMode = Boolean.valueOf(ConfigHelper.getFieldOrEnv(config, "test_mode", "false"));

    FileSystemLock fileSystemLock = new FileSystemLock(Paths.get(fileSystem.toString(), ".lock"), 60,
            TimeUnit.SECONDS);

    for (String jsonListenAddress : ConfigHelper.getArrayFieldOrEnv(config, "http.listen.addresses",
            new String[] {})) {
        parsedListenAddresses.add(HostAndPort.fromString(jsonListenAddress));
    }

    for (String jsonPublishAddress : ConfigHelper.getArrayFieldOrEnv(config, "http.publish.addresses",
            Iterables.transform(parsedListenAddresses, input -> input.toString()))) {
        parsedPublishAddresses.add(HostAndPort.fromString(jsonPublishAddress));
    }

    if (parsedPublishAddresses.isEmpty()) {
        parsedPublishAddresses.addAll(parsedListenAddresses);
    }

    for (String jsonClusterHost : ConfigHelper.getArrayFieldOrEnv(config, "cluster.hosts",
            Iterables.transform(parsedPublishAddresses, input -> input.toString()))) {
        clusterHosts.add(HostAndPort.fromString(jsonClusterHost));
    }

    // adds itself to the list if the list is empty
    if (clusterHosts.isEmpty()) {
        clusterHosts.addAll(parsedPublishAddresses);
    }

    int threadPoolIoQueueSize = new Integer(
            ConfigHelper.getFieldOrEnv(config, "threadpool.io.queuesize", "10000"));
    Preconditions.checkArgument(threadPoolIoQueueSize > 0, "threadpool.io.queuesize must be greater than 0");

    int threadPoolIoSize = new Integer(ConfigHelper.getFieldOrEnv(config, "threadpool.io.size",
            String.valueOf(Runtime.getRuntime().availableProcessors() * 2)));
    Preconditions.checkArgument(threadPoolIoSize > 0, "threadpool.io.size must be greater than 0");

    int threadPoolBackgroundSize = new Integer(ConfigHelper.getFieldOrEnv(config, "threadpool.background.size",
            String.valueOf(Runtime.getRuntime().availableProcessors() * 2)));
    Preconditions.checkArgument(threadPoolBackgroundSize > 0,
            "threadpool.background.size must be greater than 0");

    int threadPoolBackgroundQueueSize = new Integer(
            ConfigHelper.getFieldOrEnv(config, "threadpool.background.queuesize", "10000"));
    Preconditions.checkArgument(threadPoolBackgroundQueueSize > 0,
            "threadpool.background.queuesize must be greater than 0");

    ioPool = NamedCapacityFixedThreadPool.newInstance(threadPoolIoSize, threadPoolIoQueueSize, "sfs-io-pool");
    backgroundPool = NamedCapacityFixedThreadPool.newInstance(threadPoolBackgroundSize,
            threadPoolBackgroundQueueSize, "sfs-blocking-action-pool");

    this.vertxContext = new VertxContext<>(this);

    nodes = new Nodes();

    verticleMaxHeaderSize = new Integer(ConfigHelper.getFieldOrEnv(config, "http.maxheadersize", "8192"));
    Preconditions.checkArgument(verticleMaxHeaderSize > 0, "http.maxheadersize must be greater than 0");

    int nodeStatsRefreshInterval = new Integer(ConfigHelper.getFieldOrEnv(config, "node_stats_refresh_interval",
            String.valueOf(TimeUnit.SECONDS.toMillis(1))));
    Preconditions.checkArgument(nodeStatsRefreshInterval > 0,
            "node_stats_refresh_interval must be greater than 0");

    remoteNodeMaxPoolSize = new Integer(ConfigHelper.getFieldOrEnv(config, "remotenode.maxpoolsize", "25"));
    Preconditions.checkArgument(remoteNodeMaxPoolSize > 0, "remotenode.maxpoolsize must be greater than 0");

    remoteNodeConnectTimeout = new Integer(
            ConfigHelper.getFieldOrEnv(config, "remotenode.connectimeout", "30000"));
    Preconditions.checkArgument(remoteNodeConnectTimeout > 0,
            "remotenode.connectimeout must be greater than 0");

    int remoteNodeResponseTimeout = new Integer(
            ConfigHelper.getFieldOrEnv(config, "remotenode.responsetimeout", "30000"));
    Preconditions.checkArgument(remoteNodeResponseTimeout > 0,
            "remotenode.responsetimeout must be greater than 0");

    String strRemoteNodeSecret = ConfigHelper.getFieldOrEnv(config, "remotenode.secret");
    Preconditions.checkArgument(strRemoteNodeSecret != null, "remotenode.secret is required");
    remoteNodeSecret = BaseEncoding.base64().decode(strRemoteNodeSecret);

    int numberOfObjectReplicas = new Integer(
            ConfigHelper.getFieldOrEnv(config, "number_of_object_replicas", "0"));
    Preconditions.checkArgument(numberOfObjectReplicas >= 0,
            "number_of_object_replicas must be greater or equal to 0");

    int tempFileTtl = new Integer(ConfigHelper.getFieldOrEnv(config, "temp_file_ttl", "86400000"));
    Preconditions.checkArgument(tempFileTtl >= 0, "temp_file_ttl must be greater or equal to 0");

    final boolean dataNode = Boolean.valueOf(ConfigHelper.getFieldOrEnv(config, "node.data", "true"));
    final boolean masterNode = Boolean.valueOf(ConfigHelper.getFieldOrEnv(config, "node.master", "true"));

    this.httpsClient = createHttpClient(vertx, true);
    this.httpClient = createHttpClient(vertx, false);

    Defer.aVoid().flatMap(aVoid -> sfsFileSystem.open(vertxContext, fileSystem))
            .flatMap(aVoid -> fileSystemLock.lock(vertxContext))
            .flatMap(aVoid -> authProviderService.open(vertxContext))
            .flatMap(aVoid -> awsKms.start(vertxContext, config))
            .flatMap(aVoid -> azureKms.start(vertxContext, config))
            .flatMap(aVoid -> tempDirectoryCleaner.start(vertxContext, tempFileTtl))
            .flatMap(aVoid -> elasticsearch.start(vertxContext, config, masterNode))
            .flatMap(aVoid -> nodes.open(vertxContext, parsedPublishAddresses, clusterHosts,
                    remoteNodeMaxPoolSize, remoteNodeConnectTimeout, remoteNodeResponseTimeout,
                    numberOfObjectReplicas, nodeStatsRefreshInterval, dataNode, masterNode))
            .flatMap(aVoid -> nodeStats.open(vertxContext)).flatMap(aVoid -> clusterInfo.open(vertxContext))
            .flatMap(aVoid -> masterKeys.start(vertxContext))
            .flatMap(aVoid -> containerKeys.start(vertxContext))
            .flatMap(aVoid -> jobs.open(vertxContext, config))
            .flatMap(aVoid -> initHttpListeners(vertxContext, true))
            .doOnNext(httpServers1 -> httpServers.addAll(httpServers)).subscribe(o -> {
                // do nothing
            }, throwable -> {
                LOGGER.error("Failed to start verticle " + _this, throwable);
                startException = throwable;
                started = true;
                startedResult.fail(throwable);
            }, () -> {
                LOGGER.info("Started verticle " + _this);
                started = true;
                startedResult.complete();
            });
}

From source file:brooklyn.networking.vclouddirector.PortForwarderVcloudDirector.java

@Override
public HostAndPort openPortForwarding(HostAndPort targetEndpoint, Optional<Integer> optionalPublicPort,
        Protocol protocol, Cidr accessingCidr) {
    // TODO should associate ip:port with PortForwardManager; but that takes location param
    //      getPortForwardManager().associate(publicIp, publicPort, targetVm, targetPort);
    // TODO Could check old mapping, and re-use that public port
    // TODO Pass cidr in vcloud-director call
    String publicIp = subnetTier.getConfig(NETWORK_PUBLIC_IP);

    HostAndPort publicEndpoint;//from   w ww .  ja  v a2s .co m
    PortRange portRangeToUse;
    if (optionalPublicPort.isPresent()) {
        publicEndpoint = HostAndPort.fromParts(publicIp, optionalPublicPort.get());
        portRangeToUse = null;
    } else if (getNatMicroserviceAutoAllocatesPorts()) {
        publicEndpoint = HostAndPort.fromString(publicIp);
        portRangeToUse = getPortRange();
    } else {
        PortForwardManager pfw = getPortForwardManager();
        int publicPort = pfw.acquirePublicPort(publicIp);
        publicEndpoint = HostAndPort.fromParts(publicIp, publicPort);
        portRangeToUse = null;
    }

    try {
        HostAndPort result = getClient().openPortForwarding(new PortForwardingConfig().protocol(Protocol.TCP)
                .publicEndpoint(publicEndpoint).publicPortRange(portRangeToUse).targetEndpoint(targetEndpoint));

        // TODO Work around for old vCD NAT microservice, which returned empty result
        if (!result.hasPort() && result.getHostText().equals("")) {
            if (publicEndpoint.hasPort()) {
                LOG.warn(
                        "[DEPRECATED] NAT Rule addition returned endpoint '{}'; probably old micro-service version; "
                                + "assuming result is {}->{} via {}",
                        new Object[] { result, publicEndpoint, targetEndpoint, subnetTier });
                result = publicEndpoint;
            } else {
                throw new IllegalStateException("Invalid result for NAT Rule addition, returned endpoint ''; "
                        + "cannot infer actual result as no explicit port requested for " + publicEndpoint
                        + "->" + targetEndpoint + " via " + subnetTier);
            }
        }

        LOG.debug("Enabled port-forwarding for {}, via {}, on ",
                new Object[] { targetEndpoint, result, subnetTier });
        return result;
    } catch (Exception e) {
        Exceptions.propagateIfFatal(e);
        LOG.info("Failed creating port forwarding rule on " + this + ": " + publicEndpoint + "->"
                + targetEndpoint + "; rethrowing", e);
        throw Exceptions.propagate(e);
    }
}

From source file:nl.esciencecenter.xenon.adaptors.shared.ssh.SSHUtil.java

public static int getPort(String adaptorName, String location) throws InvalidLocationException {
    // Parse locations of the format: hostname[:port] and return the host
    try {//from w ww  . ja v a 2  s .  co m
        return HostAndPort.fromString(location).getPortOrDefault(DEFAULT_SSH_PORT);
    } catch (Exception e) {
        // TODO: could be a name in ssh_config instead ??
        throw new InvalidLocationException(adaptorName, "Failed to parse location: " + location);
    }
}

From source file:org.getlantern.firetweet.app.FiretweetApplication.java

@Override
public void onCreate() {
    StrictModeUtils.detectAll();//w w w  .  j  a va2 s  .c  o  m

    super.onCreate();

    Fabric.with(this, new Crashlytics());

    final Context context = this.getApplicationContext();
    File f = context.getFilesDir();
    String path = "";
    if (f != null) {
        path = f.getPath();
    }

    int startupTimeoutMillis = 30000;
    String trackingId = "UA-21408036-4";
    try {
        org.lantern.mobilesdk.StartResult result = Lantern.enable(context, startupTimeoutMillis, true,
                trackingId, null);
        HostAndPort hp = HostAndPort.fromString(result.getHTTPAddr());
        PROXY_HOST = hp.getHostText();
        PROXY_PORT = hp.getPort();
        Log.d(TAG, "Proxy host --> " + PROXY_HOST + " " + PROXY_PORT);
    } catch (Exception e) {
        Log.d(TAG, "Unable to start Lantern: " + e.getMessage());
    }

    mHandler = new Handler();
    mMessageBus = new Bus();
    //mPreferences = getSharedPreferences(SHARED_PREFERENCES_NAME, MODE_PRIVATE);
    //mPreferences.registerOnSharedPreferenceChangeListener(this);
    initializeAsyncTask();
    initAccountColor(this);
    initUserColor(this);

    final PackageManager pm = getPackageManager();
    final ComponentName main = new ComponentName(this, MainActivity.class);
    final ComponentName main2 = new ComponentName(this, MainHondaJOJOActivity.class);
    final boolean mainDisabled = pm
            .getComponentEnabledSetting(main) != PackageManager.COMPONENT_ENABLED_STATE_ENABLED;
    final boolean main2Disabled = pm
            .getComponentEnabledSetting(main2) != PackageManager.COMPONENT_ENABLED_STATE_ENABLED;
    final boolean noEntry = mainDisabled && main2Disabled;
    if (noEntry) {
        pm.setComponentEnabledSetting(main, PackageManager.COMPONENT_ENABLED_STATE_ENABLED,
                PackageManager.DONT_KILL_APP);
    } else if (!mainDisabled) {
        pm.setComponentEnabledSetting(main2, PackageManager.COMPONENT_ENABLED_STATE_DISABLED,
                PackageManager.DONT_KILL_APP);
    }
    startRefreshServiceIfNeeded(this);
}

From source file:org.apache.beam.runners.dataflow.worker.windmill.GrpcWindmillServer.java

public GrpcWindmillServer(StreamingDataflowWorkerOptions options) throws IOException {
    this.options = options;
    this.streamingRpcBatchLimit = options.getWindmillServiceStreamingRpcBatchLimit();
    this.endpoints = ImmutableSet.of();
    if (options.getWindmillServiceEndpoint() != null) {
        Set<HostAndPort> endpoints = new HashSet<>();
        for (String endpoint : Splitter.on(',').split(options.getWindmillServiceEndpoint())) {
            endpoints.add(HostAndPort.fromString(endpoint).withDefaultPort(options.getWindmillServicePort()));
        }/*from w  ww .  j a  va2s  . c o  m*/
        initializeWindmillService(endpoints);
    } else if (!streamingEngineEnabled() && options.getLocalWindmillHostport() != null) {
        int portStart = options.getLocalWindmillHostport().lastIndexOf(':');
        String endpoint = options.getLocalWindmillHostport().substring(0, portStart);
        assert ("grpc:localhost".equals(endpoint));
        int port = Integer.parseInt(options.getLocalWindmillHostport().substring(portStart + 1));
        this.endpoints = ImmutableSet.<HostAndPort>of(HostAndPort.fromParts("localhost", port));
        initializeLocalHost(port);
    }
}