Example usage for com.google.common.net HostAndPort fromString

List of usage examples for com.google.common.net HostAndPort fromString

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort fromString.

Prototype

public static HostAndPort fromString(String hostPortString) 

Source Link

Document

Split a freeform string into a host and port, without strict validation.

Usage

From source file:org.apache.bookkeeper.stats.CodahaleMetricsProvider.java

@Override
public void start(Configuration conf) {
    initIfNecessary();//from   ww  w  . ja  v  a2 s  . co  m

    int metricsOutputFrequency = conf.getInt("codahaleStatsOutputFrequencySeconds", 60);
    String prefix = conf.getString("codahaleStatsPrefix", "");
    String graphiteHost = conf.getString("codahaleStatsGraphiteEndpoint");
    String csvDir = conf.getString("codahaleStatsCSVEndpoint");
    String slf4jCat = conf.getString("codahaleStatsSlf4jEndpoint");
    String jmxDomain = conf.getString("codahaleStatsJmxEndpoint");

    if (!Strings.isNullOrEmpty(graphiteHost)) {
        LOG.info("Configuring stats with graphite");
        HostAndPort addr = HostAndPort.fromString(graphiteHost);
        final Graphite graphite = new Graphite(new InetSocketAddress(addr.getHostText(), addr.getPort()));
        reporters.add(
                GraphiteReporter.forRegistry(getMetrics()).prefixedWith(prefix).convertRatesTo(TimeUnit.SECONDS)
                        .convertDurationsTo(TimeUnit.MILLISECONDS).filter(MetricFilter.ALL).build(graphite));
    }
    if (!Strings.isNullOrEmpty(csvDir)) {
        // NOTE: 1/ metrics output files are exclusive to a given process
        // 2/ the output directory must exist
        // 3/ if output files already exist they are not overwritten and there is no metrics output
        File outdir;
        if (!Strings.isNullOrEmpty(prefix)) {
            outdir = new File(csvDir, prefix);
        } else {
            outdir = new File(csvDir);
        }
        LOG.info("Configuring stats with csv output to directory [{}]", outdir.getAbsolutePath());
        reporters.add(CsvReporter.forRegistry(getMetrics()).convertRatesTo(TimeUnit.SECONDS)
                .convertDurationsTo(TimeUnit.MILLISECONDS).build(outdir));
    }
    if (!Strings.isNullOrEmpty(slf4jCat)) {
        LOG.info("Configuring stats with slf4j");
        reporters.add(Slf4jReporter.forRegistry(getMetrics()).outputTo(LoggerFactory.getLogger(slf4jCat))
                .convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build());
    }
    if (!Strings.isNullOrEmpty(jmxDomain)) {
        LOG.info("Configuring stats with jmx");
        jmx = JmxReporter.forRegistry(getMetrics()).inDomain(jmxDomain).convertRatesTo(TimeUnit.SECONDS)
                .convertDurationsTo(TimeUnit.MILLISECONDS).build();
        jmx.start();
    }

    for (ScheduledReporter r : reporters) {
        r.start(metricsOutputFrequency, TimeUnit.SECONDS);
    }
}

From source file:com.palantir.atlasdb.factory.Leaders.java

public static Map<PingableLeader, HostAndPort> generatePingables(Collection<String> remoteEndpoints,
        Optional<SSLSocketFactory> sslSocketFactory) {
    /* The interface used as a key here may be a proxy, which may have strange .equals() behavior.
     * This is circumvented by using an IdentityHashMap which will just use native == for equality.
     *//*from   w ww. ja  v  a  2s. c o  m*/
    Map<PingableLeader, HostAndPort> pingables = new IdentityHashMap<PingableLeader, HostAndPort>();
    for (String endpoint : remoteEndpoints) {
        PingableLeader remoteInterface = AtlasDbHttpClients.createProxy(sslSocketFactory, endpoint,
                PingableLeader.class);
        HostAndPort hostAndPort = HostAndPort.fromString(endpoint);
        pingables.put(remoteInterface, hostAndPort);
    }
    return pingables;
}

From source file:io.druid.firehose.kafka.KafkaSimpleConsumer.java

public KafkaSimpleConsumer(String topic, int partitionId, String clientId, List<String> brokers,
        boolean earliest) {
    List<HostAndPort> brokerList = new ArrayList<>();
    for (String broker : brokers) {
        HostAndPort brokerHostAndPort = HostAndPort.fromString(broker);
        Preconditions.checkArgument(/*ww w . jav a 2  s .  c  o m*/
                brokerHostAndPort.getHostText() != null && !brokerHostAndPort.getHostText().isEmpty()
                        && brokerHostAndPort.hasPort(),
                "kafka broker [%s] is not valid, must be <host>:<port>", broker);
        brokerList.add(brokerHostAndPort);
    }

    this.allBrokers = Collections.unmodifiableList(brokerList);
    this.topic = topic;
    this.partitionId = partitionId;
    this.clientId = String.format("%s_%d_%s", topic, partitionId, clientId);
    this.leaderLookupClientId = clientId + "leaderLookup";
    this.replicaBrokers = new ArrayList<>();
    this.replicaBrokers.addAll(this.allBrokers);
    this.earliest = earliest;
    log.info(
            "KafkaSimpleConsumer initialized with clientId [%s] for message consumption and clientId [%s] for leader lookup",
            this.clientId, this.leaderLookupClientId);
}

From source file:org.apache.beam.runners.flink.FlinkExecutionEnvironments.java

static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options,
        List<String> filesToStage, @Nullable String confDir) {

    LOG.info("Creating a Batch Execution Environment.");

    String masterUrl = options.getFlinkMaster();
    Configuration flinkConfiguration = getFlinkConfiguration(confDir);
    ExecutionEnvironment flinkBatchEnv;//from   ww w.  j  ava  2s .  c  o  m

    // depending on the master, create the right environment.
    if ("[local]".equals(masterUrl)) {
        flinkBatchEnv = ExecutionEnvironment.createLocalEnvironment(flinkConfiguration);
    } else if ("[collection]".equals(masterUrl)) {
        flinkBatchEnv = new CollectionEnvironment();
    } else if ("[auto]".equals(masterUrl)) {
        flinkBatchEnv = ExecutionEnvironment.getExecutionEnvironment();
    } else {
        int defaultPort = flinkConfiguration.getInteger(RestOptions.PORT);
        HostAndPort hostAndPort = HostAndPort.fromString(masterUrl).withDefaultPort(defaultPort);
        flinkConfiguration.setInteger(RestOptions.PORT, hostAndPort.getPort());
        flinkBatchEnv = ExecutionEnvironment.createRemoteEnvironment(hostAndPort.getHost(),
                hostAndPort.getPort(), flinkConfiguration,
                filesToStage.toArray(new String[filesToStage.size()]));
        LOG.info("Using Flink Master URL {}:{}.", hostAndPort.getHost(), hostAndPort.getPort());
    }

    // Set the execution more for data exchange.
    flinkBatchEnv.getConfig().setExecutionMode(options.getExecutionModeForBatch());

    // set the correct parallelism.
    if (options.getParallelism() != -1 && !(flinkBatchEnv instanceof CollectionEnvironment)) {
        flinkBatchEnv.setParallelism(options.getParallelism());
    }
    // Set the correct parallelism, required by UnboundedSourceWrapper to generate consistent splits.
    final int parallelism;
    if (flinkBatchEnv instanceof CollectionEnvironment) {
        parallelism = 1;
    } else {
        parallelism = determineParallelism(options.getParallelism(), flinkBatchEnv.getParallelism(),
                flinkConfiguration);
    }

    flinkBatchEnv.setParallelism(parallelism);
    // set parallelism in the options (required by some execution code)
    options.setParallelism(parallelism);

    if (options.getObjectReuse()) {
        flinkBatchEnv.getConfig().enableObjectReuse();
    } else {
        flinkBatchEnv.getConfig().disableObjectReuse();
    }

    applyLatencyTrackingInterval(flinkBatchEnv.getConfig(), options);

    return flinkBatchEnv;
}

From source file:io.druid.server.DruidNode.java

private void init(String serviceName, String host, Integer port) {
    Preconditions.checkNotNull(serviceName);
    this.serviceName = serviceName;

    if (host == null && port == null) {
        host = getDefaultHost();//from ww  w. j  ava 2s . c  o  m
        port = -1;
    } else {
        final HostAndPort hostAndPort;
        if (host != null) {
            hostAndPort = HostAndPort.fromString(host);
            if (port != null && hostAndPort.hasPort() && port != hostAndPort.getPort()) {
                throw new IAE("Conflicting host:port [%s] and port [%d] settings", host, port);
            }
        } else {
            hostAndPort = HostAndPort.fromParts(getDefaultHost(), port);
        }

        host = hostAndPort.getHostText();

        if (hostAndPort.hasPort()) {
            port = hostAndPort.getPort();
        }

        if (port == null) {
            port = SocketUtil.findOpenPort(8080);
        }
    }

    this.port = port;
    this.host = host;
}

From source file:com.spotify.heroic.cache.memcached.MemcachedCacheModule.java

@Provides
@CacheScope/*from w  ww. j a  va2  s  . com*/
public Managed<MemcacheClient<byte[]>> memcacheClient(final AsyncFramework async) {
    return async.managed(new ManagedSetup<MemcacheClient<byte[]>>() {
        @Override
        public AsyncFuture<MemcacheClient<byte[]>> construct() throws Exception {
            final List<HostAndPort> addresses = new ArrayList<>();

            for (final String address : MemcachedCacheModule.this.addresses) {
                addresses.add(HostAndPort.fromString(address));
            }

            final BinaryMemcacheClient<byte[]> client = MemcacheClientBuilder.newByteArrayClient()
                    .withAddresses(addresses).connectBinary();

            final ResolvableFuture<MemcacheClient<byte[]>> future = async.future();

            Futures.addCallback(ConnectFuture.connectFuture(client), new FutureCallback<Void>() {
                @Override
                public void onSuccess(@Nullable final Void result) {
                    future.resolve(client);
                }

                @Override
                public void onFailure(final Throwable cause) {
                    future.fail(cause);
                }
            });

            return future;
        }

        @Override
        public AsyncFuture<Void> destruct(final MemcacheClient<byte[]> value) throws Exception {
            return async.call(() -> {
                value.shutdown();
                return null;
            });
        }
    });
}

From source file:io.prestosql.benchmark.driver.BenchmarkDriverOptions.java

private static URI parseServer(String server) {
    server = server.toLowerCase(ENGLISH);
    if (server.startsWith("http://") || server.startsWith("https://")) {
        return URI.create(server);
    }// w w w .ja  v  a 2s.  c  om

    HostAndPort host = HostAndPort.fromString(server);
    try {
        return new URI("http", null, host.getHost(), host.getPortOrDefault(80), null, null, null);
    } catch (URISyntaxException e) {
        throw new IllegalArgumentException(e);
    }
}

From source file:org.graylog2.logback.appender.Graylog2Plugin.java

public Graylog2Plugin(Application app) {
    final Configuration config = app.configuration();
    this.pluginEnabled = config.getBoolean("graylog2.enable.plugin", false);
    if (!this.pluginEnabled) {
        return;//w  w  w. j  a v a2s  .com
    }
    accessLogEnabled = config.getBoolean("graylog2.appender.send-access-log", false);
    queueCapacity = config.getInt("graylog2.appender.queue-size", 512);
    reconnectInterval = config.getMilliseconds("graylog2.appender.reconnect-interval", 500L);
    connectTimeout = config.getMilliseconds("graylog2.appender.connect-timeout", 1000L);
    isTcpNoDelay = config.getBoolean("graylog2.appender.tcp-nodelay", false);
    sendBufferSize = config.getInt("graylog2.appender.sendbuffersize", 0); // causes the socket default to be used
    try {
        canonicalHostName = config.getString("graylog2.appender.sourcehost",
                InetAddress.getLocalHost().getCanonicalHostName());
    } catch (UnknownHostException e) {
        canonicalHostName = "localhost";
        log.error("Unable to resolve canonical localhost name. "
                + "Please set it manually via graylog2.appender.sourcehost or fix your lookup service, falling back to {}",
                canonicalHostName);
    }
    // TODO make this a list and dynamically accessible from the application
    final String hostString = config.getString("graylog2.appender.host", "127.0.0.1:12201");
    final String protocol = config.getString("graylog2.appender.protocol", "tcp");

    final HostAndPort hostAndPort = HostAndPort.fromString(hostString);

    final GelfTransports gelfTransport = GelfTransports.valueOf(protocol.toUpperCase());

    final GelfConfiguration gelfConfiguration = new GelfConfiguration(hostAndPort.getHostText(),
            hostAndPort.getPort()).transport(gelfTransport).reconnectDelay(reconnectInterval.intValue())
                    .queueSize(queueCapacity).connectTimeout(connectTimeout.intValue()).tcpNoDelay(isTcpNoDelay)
                    .sendBufferSize(sendBufferSize);

    this.transport = GelfTransports.create(gelfConfiguration);

    final LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
    rootLogger = lc.getLogger(Logger.ROOT_LOGGER_NAME);

    gelfAppender = new GelfclientAppender(transport, getLocalHostName());
    gelfAppender.setContext(lc);
}

From source file:org.apache.sentry.core.common.utils.ThriftUtil.java

/**
 * Utility function for parsing host and port strings. Expected form should be
 * (host:port). The hostname could be in ipv6 style. If port is not specified,
 * defaultPort will be used.//from  w  w  w.j  av  a2s . c om
 */
public static HostAndPort parseAddress(String address, int defaultPort) {
    return HostAndPort.fromString(address).withDefaultPort(defaultPort);
}

From source file:org.apache.druid.firehose.kafka.KafkaSimpleConsumer.java

public KafkaSimpleConsumer(String topic, int partitionId, String clientId, List<String> brokers,
        boolean earliest) {
    List<HostAndPort> brokerList = new ArrayList<>();
    for (String broker : brokers) {
        HostAndPort brokerHostAndPort = HostAndPort.fromString(broker);
        Preconditions.checkArgument(/*from w ww. jav a 2 s.c o  m*/
                brokerHostAndPort.getHostText() != null && !brokerHostAndPort.getHostText().isEmpty()
                        && brokerHostAndPort.hasPort(),
                "kafka broker [%s] is not valid, must be <host>:<port>", broker);
        brokerList.add(brokerHostAndPort);
    }

    this.allBrokers = Collections.unmodifiableList(brokerList);
    this.topic = topic;
    this.partitionId = partitionId;
    this.clientId = StringUtils.format("%s_%d_%s", topic, partitionId, clientId);
    this.leaderLookupClientId = clientId + "leaderLookup";
    this.replicaBrokers = new ArrayList<>();
    this.replicaBrokers.addAll(this.allBrokers);
    this.earliest = earliest;
    log.info(
            "KafkaSimpleConsumer initialized with clientId [%s] for message consumption and clientId [%s] for leader lookup",
            this.clientId, this.leaderLookupClientId);
}