Example usage for com.google.common.net HostAndPort fromString

List of usage examples for com.google.common.net HostAndPort fromString

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort fromString.

Prototype

public static HostAndPort fromString(String hostPortString) 

Source Link

Document

Split a freeform string into a host and port, without strict validation.

Usage

From source file:zipkin.dependencies.cassandra.CassandraDependenciesJob.java

static String parseHosts(String contactPoints) {
    List<String> result = new LinkedList<>();
    for (String contactPoint : contactPoints.split(",")) {
        HostAndPort parsed = HostAndPort.fromString(contactPoint);
        result.add(parsed.getHostText());
    }/* ww w  .  j  a  va2  s .  c  o  m*/
    return Joiner.on(',').join(result);
}

From source file:brooklyn.networking.vclouddirector.NatMicroserviceClient.java

@Override
public HostAndPort openPortForwarding(PortForwardingConfig args) {
    HttpClient client = HttpTool.httpClientBuilder().uri(microserviceUri).trustSelfSigned().build();

    Escaper escaper = UrlEscapers.urlPathSegmentEscaper();
    URI uri = URI.create(Urls.mergePaths(microserviceUri,
            "/v1/nat" + "?endpoint=" + escaper.escape(endpoint)
                    + (Strings.isNonBlank(vDC) ? "&vdc=" + escaper.escape(vDC) : "") + "&identity="
                    + escaper.escape(identity) + "&credential=" + escaper.escape(credential) + "&protocol="
                    + args.getProtocol() + "&original=" + args.getPublicEndpoint()
                    + (args.getPublicPortRange() == null ? ""
                            : "&originalPortRange=" + args.getPublicPortRange().toString())
                    + "&translated=" + args.getTargetEndpoint()));

    if (LOG.isDebugEnabled())
        LOG.debug("PUT {}", uri.toString().replace(escaper.escape(credential), "xxxxxxxx"));

    HttpToolResponse response = HttpTool.httpPut(client, uri, ImmutableMap.<String, String>of(), new byte[0]);
    if (response.getResponseCode() < 200 || response.getResponseCode() >= 300) {
        String msg = "Open NAT Rule failed for " + args + ": " + response.getResponseCode() + "; "
                + response.getReasonPhrase() + ": " + response.getContentAsString();
        LOG.info(msg + "; rethrowing");
        throw new RuntimeException(msg);
    }//w  ww.j a v  a 2 s . c o m
    return HostAndPort.fromString(response.getContentAsString());
}

From source file:org.sfs.integration.java.BaseTestVerticle.java

@Before
public void before(TestContext context) {
    vertx = rule.vertx();//from  w  w w. j  a  v  a 2s.c  o m
    Async async = context.async();
    aVoid().flatMap(aVoid -> {
        String clusteruuid = UUID.randomUUID().toString();
        try {
            rootTmpDir = createTempDirectory("");
            esTempDir = createTempDirectory(rootTmpDir, format("test-cluster-%s", clusteruuid));
            tmpDir = createTempDirectory(rootTmpDir, valueOf(currentTimeMillis()));
        } catch (IOException e) {
            throw new RuntimeException(e);
        }

        int esPort = findFreePort(9300, 9400);
        esHost = "127.0.0.1:" + esPort;
        esClusterName = format("test-cluster-%s", clusteruuid);
        esNodeName = format("test-server-node-%s", clusteruuid);

        Builder settings = settingsBuilder();
        settings.put("script.groovy.sandbox.enabled", false);
        settings.put("cluster.name", esClusterName);
        settings.put("node.name", esNodeName);
        settings.put("http.enabled", false);
        settings.put("discovery.zen.ping.multicast.enabled", false);
        settings.put("discovery.zen.ping.unicast.hosts", esHost);
        settings.put("transport.tcp.port", esPort);
        settings.put("network.host", "127.0.0.1");
        settings.put("node.data", true);
        settings.put("node.master", true);
        settings.put("path.home", esTempDir);
        esNode = nodeBuilder().settings(settings).node();
        esClient = esNode.client();

        JsonObject verticleConfig;

        Buffer buffer = vertx.fileSystem().readFileBlocking(
                currentThread().getContextClassLoader().getResource("intgtestconfig.json").getFile());
        verticleConfig = new JsonObject(buffer.toString(UTF_8));
        verticleConfig.put("fs.home", tmpDir.toString());

        if (!verticleConfig.containsKey("elasticsearch.cluster.name")) {
            verticleConfig.put("elasticsearch.cluster.name", esClusterName);
        }

        if (!verticleConfig.containsKey("elasticsearch.node.name")) {
            verticleConfig.put("elasticsearch.node.name", esNodeName);
        }

        if (!verticleConfig.containsKey("elasticsearch.discovery.zen.ping.unicast.hosts")) {
            verticleConfig.put("elasticsearch.discovery.zen.ping.unicast.hosts", new JsonArray().add(esHost));
        }

        if (!verticleConfig.containsKey("http.listen.addresses")) {
            int freePort = findFreePort(6677, 7777);
            verticleConfig.put("http.listen.addresses",
                    new JsonArray().add(HostAndPort.fromParts("127.0.0.1", freePort).toString()));
        }

        HostAndPort hostAndPort = HostAndPort
                .fromString(verticleConfig.getJsonArray("http.listen.addresses").getString(0));

        HttpClientOptions httpClientOptions = new HttpClientOptions();
        httpClientOptions.setDefaultPort(hostAndPort.getPort()).setDefaultHost(hostAndPort.getHostText())
                .setMaxPoolSize(25).setConnectTimeout(1000).setKeepAlive(false).setLogActivity(true);

        HttpClientOptions httpsClientOptions = new HttpClientOptions();
        httpsClientOptions.setDefaultPort(hostAndPort.getPort()).setDefaultHost(hostAndPort.getHostText())
                .setMaxPoolSize(25).setConnectTimeout(1000).setKeepAlive(false).setLogActivity(true)
                .setSsl(true);
        httpClient = vertx.createHttpClient(httpClientOptions);
        httpsClient = vertx.createHttpClient(httpsClientOptions);

        SfsServer sfsServer = new SfsServer();

        ObservableFuture<String> handler = RxHelper.observableFuture();
        vertx.deployVerticle(sfsServer, new DeploymentOptions().setConfig(verticleConfig), handler.toHandler());
        return handler.map(new ToVoid<>()).doOnNext(aVoid1 -> {
            vertxContext = sfsServer.vertxContext();
            checkState(vertxContext != null, "VertxContext was null on Verticle %s", sfsServer);
        }).onErrorResumeNext(throwable -> {
            throwable.printStackTrace();
            return cleanup().flatMap(aVoid1 -> Observable.<Void>error(throwable));
        });
    }).subscribe(new TestSubscriber(context, async));
}

From source file:org.apache.accumulo.core.client.impl.InstanceOperationsImpl.java

@Override
public List<ActiveScan> getActiveScans(String tserver) throws AccumuloException, AccumuloSecurityException {
    final HostAndPort parsedTserver = HostAndPort.fromString(tserver);
    Client client = null;/*from   w ww  . ja v  a2  s  . c o m*/
    try {
        client = ThriftUtil.getTServerClient(parsedTserver, context);

        List<ActiveScan> as = new ArrayList<ActiveScan>();
        for (org.apache.accumulo.core.tabletserver.thrift.ActiveScan activeScan : client
                .getActiveScans(Tracer.traceInfo(), context.rpcCreds())) {
            try {
                as.add(new ActiveScanImpl(context.getInstance(), activeScan));
            } catch (TableNotFoundException e) {
                throw new AccumuloException(e);
            }
        }
        return as;
    } catch (TTransportException e) {
        throw new AccumuloException(e);
    } catch (ThriftSecurityException e) {
        throw new AccumuloSecurityException(e.user, e.code, e);
    } catch (TException e) {
        throw new AccumuloException(e);
    } finally {
        if (client != null)
            ThriftUtil.returnClient(client);
    }
}

From source file:zipkin.dependencies.cassandra.CassandraDependenciesJob.java

/** Returns the consistent port across all contact points or 9042 */
static String parsePort(String contactPoints) {
    Set<Integer> ports = Sets.newLinkedHashSet();
    for (String contactPoint : contactPoints.split(",")) {
        HostAndPort parsed = HostAndPort.fromString(contactPoint);
        ports.add(parsed.getPortOrDefault(9042));
    }//from w  w w  . jav a 2s .  co  m
    return ports.size() == 1 ? String.valueOf(ports.iterator().next()) : "9042";
}

From source file:com.facebook.presto.hive.AbstractTestHiveClientS3.java

protected void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey,
        String writableBucket) {// ww  w  . j  a  v a2  s.c  o m
    this.writableBucket = writableBucket;

    setupHive(databaseName);

    HiveClientConfig hiveClientConfig = new HiveClientConfig().setS3AwsAccessKey(awsAccessKey)
            .setS3AwsSecretKey(awsSecretKey);

    String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
    if (proxy != null) {
        hiveClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
    }

    HiveConnectorId connectorId = new HiveConnectorId("hive-test");
    HiveCluster hiveCluster = new TestingHiveCluster(hiveClientConfig, host, port);
    ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-s3-%s"));
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(
            new HdfsConfigurationUpdater(hiveClientConfig));
    HivePartitionManager hivePartitionManager = new HivePartitionManager(connectorId, hiveClientConfig);

    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig);
    locationService = new HiveLocationService(metastoreClient, hdfsEnvironment);
    metastoreClient = new TestingHiveMetastore(hiveCluster, executor, hiveClientConfig, writableBucket,
            hdfsEnvironment);
    TypeRegistry typeManager = new TypeRegistry();
    JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
    metadata = new HiveMetadata(connectorId, hiveClientConfig, metastoreClient, hdfsEnvironment,
            hivePartitionManager, newDirectExecutorService(), typeManager, locationService,
            partitionUpdateCodec);
    splitManager = new HiveSplitManager(connectorId, hiveClientConfig, metastoreClient, new NamenodeStats(),
            hdfsEnvironment, new HadoopDirectoryLister(), executor);
    pageSinkProvider = new HivePageSinkProvider(hdfsEnvironment, metastoreClient,
            new GroupByHashPageIndexerFactory(), typeManager, new HiveClientConfig(), locationService,
            partitionUpdateCodec);
    pageSourceProvider = new HivePageSourceProvider(hiveClientConfig, hdfsEnvironment,
            DEFAULT_HIVE_RECORD_CURSOR_PROVIDER, DEFAULT_HIVE_DATA_STREAM_FACTORIES, TYPE_MANAGER);
}

From source file:ezbake.discovery.servicediscovery.ZooKeeperServiceRegistry.java

@Override
public List<ServiceInstance> listInstances(String applicationName, String serviceName, String serviceType)
        throws DiscoveryException {
    String path = formatZooKeeperServiceTypePath(applicationName, serviceName, serviceType);
    List<ServiceInstance> instances = new ArrayList<ServiceInstance>();
    InterProcessLock readLock = readWriteLock.readLock();
    Exception maybeError = null;//from  ww  w. j a  v  a2  s . c o  m

    try {
        List<String> children;
        readLock.acquire();

        if (client.checkExists().forPath(path) == null) {
            children = Collections.emptyList();
        } else {
            children = client.getChildren().forPath(path);
        }

        for (String c : children) {
            HostAndPort hostAndPort = HostAndPort.fromString(c);
            instances.add(new BasicServiceInstance(applicationName, serviceName, serviceType,
                    hostAndPort.getHostText(), hostAndPort.getPort()));
        }

    } catch (Exception e) {
        maybeError = e;
    } finally {
        safelyRelease(readLock, maybeError);
    }

    return instances;
}

From source file:org.apache.beam.runners.flink.FlinkExecutionEnvironments.java

@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options,
        List<String> filesToStage, @Nullable String confDir) {

    LOG.info("Creating a Streaming Environment.");

    String masterUrl = options.getFlinkMaster();
    Configuration flinkConfig = getFlinkConfiguration(confDir);
    final StreamExecutionEnvironment flinkStreamEnv;

    // depending on the master, create the right environment.
    if ("[local]".equals(masterUrl)) {
        flinkStreamEnv = StreamExecutionEnvironment.createLocalEnvironment(getDefaultLocalParallelism(),
                flinkConfig);/* w w w . j  av  a2s . c  o m*/
    } else if ("[auto]".equals(masterUrl)) {
        flinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    } else {
        int defaultPort = flinkConfig.getInteger(RestOptions.PORT);
        HostAndPort hostAndPort = HostAndPort.fromString(masterUrl).withDefaultPort(defaultPort);
        flinkConfig.setInteger(RestOptions.PORT, hostAndPort.getPort());
        final SavepointRestoreSettings savepointRestoreSettings;
        if (options.getSavepointPath() != null) {
            savepointRestoreSettings = SavepointRestoreSettings.forPath(options.getSavepointPath(),
                    options.getAllowNonRestoredState());
        } else {
            savepointRestoreSettings = SavepointRestoreSettings.none();
        }
        flinkStreamEnv = new BeamFlinkRemoteStreamEnvironment(hostAndPort.getHost(), hostAndPort.getPort(),
                flinkConfig, savepointRestoreSettings, filesToStage.toArray(new String[filesToStage.size()]));
        LOG.info("Using Flink Master URL {}:{}.", hostAndPort.getHost(), hostAndPort.getPort());
    }

    // Set the parallelism, required by UnboundedSourceWrapper to generate consistent splits.
    final int parallelism = determineParallelism(options.getParallelism(), flinkStreamEnv.getParallelism(),
            flinkConfig);
    flinkStreamEnv.setParallelism(parallelism);
    if (options.getMaxParallelism() > 0) {
        flinkStreamEnv.setMaxParallelism(options.getMaxParallelism());
    }
    // set parallelism in the options (required by some execution code)
    options.setParallelism(parallelism);

    if (options.getObjectReuse()) {
        flinkStreamEnv.getConfig().enableObjectReuse();
    } else {
        flinkStreamEnv.getConfig().disableObjectReuse();
    }

    // default to event time
    flinkStreamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

    // for the following 2 parameters, a value of -1 means that Flink will use
    // the default values as specified in the configuration.
    int numRetries = options.getNumberOfExecutionRetries();
    if (numRetries != -1) {
        flinkStreamEnv.setNumberOfExecutionRetries(numRetries);
    }
    long retryDelay = options.getExecutionRetryDelay();
    if (retryDelay != -1) {
        flinkStreamEnv.getConfig().setExecutionRetryDelay(retryDelay);
    }

    // A value of -1 corresponds to disabled checkpointing (see CheckpointConfig in Flink).
    // If the value is not -1, then the validity checks are applied.
    // By default, checkpointing is disabled.
    long checkpointInterval = options.getCheckpointingInterval();
    if (checkpointInterval != -1) {
        if (checkpointInterval < 1) {
            throw new IllegalArgumentException("The checkpoint interval must be positive");
        }
        flinkStreamEnv.enableCheckpointing(checkpointInterval, options.getCheckpointingMode());
        if (options.getCheckpointTimeoutMillis() != -1) {
            flinkStreamEnv.getCheckpointConfig().setCheckpointTimeout(options.getCheckpointTimeoutMillis());
        }
        boolean externalizedCheckpoint = options.isExternalizedCheckpointsEnabled();
        boolean retainOnCancellation = options.getRetainExternalizedCheckpointsOnCancellation();
        if (externalizedCheckpoint) {
            flinkStreamEnv.getCheckpointConfig().enableExternalizedCheckpoints(
                    retainOnCancellation ? ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
                            : ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);
        }

        long minPauseBetweenCheckpoints = options.getMinPauseBetweenCheckpoints();
        if (minPauseBetweenCheckpoints != -1) {
            flinkStreamEnv.getCheckpointConfig().setMinPauseBetweenCheckpoints(minPauseBetweenCheckpoints);
        }
    } else {
        // https://issues.apache.org/jira/browse/FLINK-2491
        // Checkpointing is disabled, we can allow shutting down sources when they're done
        options.setShutdownSourcesOnFinalWatermark(true);
    }

    applyLatencyTrackingInterval(flinkStreamEnv.getConfig(), options);

    if (options.getAutoWatermarkInterval() != null) {
        flinkStreamEnv.getConfig().setAutoWatermarkInterval(options.getAutoWatermarkInterval());
    }

    // State backend
    final StateBackend stateBackend = options.getStateBackend();
    if (stateBackend != null) {
        flinkStreamEnv.setStateBackend(stateBackend);
    }

    return flinkStreamEnv;
}

From source file:org.apache.omid.tso.client.TSOClient.java

private TSOClient(OmidClientConfiguration omidConf) throws IOException {

    // Start client with Nb of active threads = 3 as maximum.
    int tsoExecutorThreads = omidConf.getExecutorThreads();

    factory = new NioClientSocketChannelFactory(
            Executors//w  ww  . j  a  va  2  s. c  o  m
                    .newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("tsoclient-boss-%d").build()),
            Executors.newCachedThreadPool(
                    new ThreadFactoryBuilder().setNameFormat("tsoclient-worker-%d").build()),
            tsoExecutorThreads);
    // Create the bootstrap
    bootstrap = new ClientBootstrap(factory);

    requestTimeoutInMs = omidConf.getRequestTimeoutInMs();
    requestMaxRetries = omidConf.getRequestMaxRetries();
    tsoReconnectionDelayInSecs = omidConf.getReconnectionDelayInSecs();

    LOG.info("Connecting to TSO...");
    HostAndPort hp;
    switch (omidConf.getConnectionType()) {
    case HA:
        zkClient = ZKUtils.initZKClient(omidConf.getConnectionString(), omidConf.getZkNamespace(),
                omidConf.getZkConnectionTimeoutInSecs());
        zkCurrentTsoPath = omidConf.getZkCurrentTsoPath();
        configureCurrentTSOServerZNodeCache(zkCurrentTsoPath);
        String tsoInfo = getCurrentTSOInfoFoundInZK(zkCurrentTsoPath);
        // TSO info includes the new TSO host:port address and epoch
        String[] currentTSOAndEpochArray = tsoInfo.split("#");
        hp = HostAndPort.fromString(currentTSOAndEpochArray[0]);
        setTSOAddress(hp.getHostText(), hp.getPort());
        epoch = Long.parseLong(currentTSOAndEpochArray[1]);
        LOG.info("\t* Current TSO host:port found in ZK: {} Epoch {}", hp, getEpoch());
        break;
    case DIRECT:
    default:
        hp = HostAndPort.fromString(omidConf.getConnectionString());
        setTSOAddress(hp.getHostText(), hp.getPort());
        LOG.info("\t* TSO host:port {} will be connected directly", hp);
        break;
    }

    fsmExecutor = Executors
            .newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setNameFormat("tsofsm-%d").build());
    fsm = new StateMachine.FsmImpl(fsmExecutor);
    fsm.setInitState(new DisconnectedState(fsm));

    ChannelPipeline pipeline = bootstrap.getPipeline();
    pipeline.addLast("lengthbaseddecoder", new LengthFieldBasedFrameDecoder(8 * 1024, 0, 4, 0, 4));
    pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));
    pipeline.addLast("protobufdecoder", new ProtobufDecoder(TSOProto.Response.getDefaultInstance()));
    pipeline.addLast("protobufencoder", new ProtobufEncoder());
    pipeline.addLast("handler", new Handler(fsm));

    bootstrap.setOption("tcpNoDelay", true);
    bootstrap.setOption("keepAlive", true);
    bootstrap.setOption("reuseAddress", true);
    bootstrap.setOption("connectTimeoutMillis", 100);
}

From source file:dk.dma.ais.reader.AisReaders.java

/**
 * Parses the string and returns either an {@link AisTcpReader} if only one hostname is found or a
 * {@link RoundRobinAisTcpReader} if more than 1 host name is found. Example
 * "mySource=ais163.sealan.dk:65262,ais167.sealan.dk:65261" will return a RoundRobinAisTcpReader alternating between
 * the two sources if one is down.//  w  w w . jav  a 2 s  .  c  o m
 * 
 * @param fullSource
 *            the full source
 * @return a ais reader
 */
static AisTcpReader parseSource(String fullSource) {
    try (Scanner s = new Scanner(fullSource);) {
        s.useDelimiter("\\s*=\\s*");
        if (!s.hasNext()) {
            throw new IllegalArgumentException(
                    "Source must be of the format src=host:port,host:port, was " + fullSource);
        }
        String src = s.next();
        if (!s.hasNext()) {
            throw new IllegalArgumentException(
                    "A list of hostname:ports must follow the source (format src=host:port,host:port), was "
                            + fullSource);
        }
        AisTcpReader r = new AisTcpReader();
        try (Scanner s1 = new Scanner(s.next())) {
            s1.useDelimiter("\\s*,\\s*");
            if (!s1.hasNext()) {
                throw new IllegalArgumentException(
                        "Source must have at least one host:port (format src=host:port,host:port), was "
                                + fullSource);
            }
            while (s1.hasNext()) {
                r.addHostPort(HostAndPort.fromString(s1.next()));
            }
        }
        r.setSourceId(src);
        return r;
    }
}