Example usage for com.google.common.net HostAndPort getPort

List of usage examples for com.google.common.net HostAndPort getPort

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort getPort.

Prototype

public int getPort() 

Source Link

Document

Get the current port number, failing if no port is defined.

Usage

From source file:com.linecorp.armeria.client.Endpoint.java

/**
 * Parse the authority part of a URI. The authority part may have one of the following formats:
 * <ul>/*from ww  w. j  a v  a 2  s  .  c o m*/
 *   <li>{@code "group:<groupName>"} for a group endpoint</li>
 *   <li>{@code "<host>:<port>"} for a host endpoint</li>
 *   <li>{@code "<host>"} for a host endpoint with no port number specified</li>
 * </ul>
 */
public static Endpoint parse(String authority) {
    requireNonNull(authority, "authority");
    if (authority.startsWith("group:")) {
        return ofGroup(authority.substring(6));
    }

    final HostAndPort parsed = HostAndPort.fromString(authority).withDefaultPort(0);
    return new Endpoint(parsed.getHostText(), parsed.getPort(), 1000);
}

From source file:io.bazel.rules.closure.webfiles.server.NetworkUtils.java

/** Turns {@code address} into a more human readable form. */
static HostAndPort createUrlAddress(HostAndPort address) {
    if (address.getHost().equals("::") || address.getHost().equals("0.0.0.0")) {
        return address.getPortOrDefault(80) == 80 ? HostAndPort.fromHost(getCanonicalHostName())
                : HostAndPort.fromParts(getCanonicalHostName(), address.getPort());
    } else {/*from  w  w  w.j  a  v  a  2s .  c om*/
        return address.getPortOrDefault(80) == 80 ? HostAndPort.fromHost(address.getHost()) : address;
    }
}

From source file:com.streamsets.pipeline.kafka.impl.KafkaValidationUtil08.java

private static TopicMetadata getTopicMetadata(List<HostAndPort> kafkaBrokers, String topic, int maxRetries,
        long backOffms) throws IOException {
    TopicMetadata topicMetadata = null;/*  ww w . j av a  2s . c o m*/
    boolean connectionError = true;
    boolean retry = true;
    int retryCount = 0;
    while (retry && retryCount <= maxRetries) {
        for (HostAndPort broker : kafkaBrokers) {
            SimpleConsumer simpleConsumer = null;
            try {
                simpleConsumer = new SimpleConsumer(broker.getHostText(), broker.getPort(),
                        METADATA_READER_TIME_OUT, BUFFER_SIZE, METADATA_READER_CLIENT);

                List<String> topics = Collections.singletonList(topic);
                TopicMetadataRequest req = new TopicMetadataRequest(topics);
                kafka.javaapi.TopicMetadataResponse resp = simpleConsumer.send(req);

                // No exception => no connection error
                connectionError = false;

                List<TopicMetadata> topicMetadataList = resp.topicsMetadata();
                if (topicMetadataList == null || topicMetadataList.isEmpty()) {
                    //This broker did not have any metadata. May not be in sync?
                    continue;
                }
                topicMetadata = topicMetadataList.iterator().next();
                if (topicMetadata != null && topicMetadata.errorCode() == 0) {
                    retry = false;
                }
            } catch (Exception e) {
                //could not connect to this broker, try others
            } finally {
                if (simpleConsumer != null) {
                    simpleConsumer.close();
                }
            }
        }
        if (retry) {
            LOG.warn(
                    "Unable to connect or cannot fetch topic metadata. Waiting for '{}' seconds before retrying",
                    backOffms / 1000);
            retryCount++;
            if (!ThreadUtil.sleep(backOffms)) {
                break;
            }
        }
    }
    if (connectionError) {
        //could not connect any broker even after retries. Fail with exception
        throw new IOException(Utils.format(KafkaErrors.KAFKA_67.getMessage(), getKafkaBrokers(kafkaBrokers)));
    }
    return topicMetadata;
}

From source file:brooklyn.entity.nosql.mongodb.MongoDBClientSupport.java

/**
 * Creates a {@link MongoDBClientSupport} instance in standalone mode.
 *//*  www . j  a  v a 2  s.  co m*/
public static MongoDBClientSupport forServer(AbstractMongoDBServer standalone) throws UnknownHostException {
    HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(standalone,
            standalone.getAttribute(MongoDBServer.PORT));
    ServerAddress address = new ServerAddress(hostAndPort.getHostText(), hostAndPort.getPort());
    return new MongoDBClientSupport(address);
}

From source file:com.google.cloud.GrpcTransportOptions.java

/**
 * Returns a channel provider./*from  w  w  w  .  j av a  2  s.  c  o m*/
 */
public static ChannelProvider getChannelProvider(ServiceOptions<?, ?> serviceOptions) {
    HostAndPort hostAndPort = HostAndPort.fromString(serviceOptions.getHost());
    InstantiatingChannelProvider.Builder builder = InstantiatingChannelProvider.newBuilder()
            .setServiceAddress(hostAndPort.getHostText()).setPort(hostAndPort.getPort())
            .setClientLibHeader(serviceOptions.getGoogApiClientLibName(),
                    firstNonNull(serviceOptions.getLibraryVersion(), ""));
    Credentials scopedCredentials = serviceOptions.getScopedCredentials();
    if (scopedCredentials != null && scopedCredentials != NoCredentials.getInstance()) {
        builder.setCredentialsProvider(FixedCredentialsProvider.create(scopedCredentials));
    }
    return builder.build();
}

From source file:org.apache.beam.runners.flink.FlinkExecutionEnvironments.java

static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options,
        List<String> filesToStage, @Nullable String confDir) {

    LOG.info("Creating a Batch Execution Environment.");

    String masterUrl = options.getFlinkMaster();
    Configuration flinkConfiguration = getFlinkConfiguration(confDir);
    ExecutionEnvironment flinkBatchEnv;//from   w  w  w  .j  av a 2s  .  co m

    // depending on the master, create the right environment.
    if ("[local]".equals(masterUrl)) {
        flinkBatchEnv = ExecutionEnvironment.createLocalEnvironment(flinkConfiguration);
    } else if ("[collection]".equals(masterUrl)) {
        flinkBatchEnv = new CollectionEnvironment();
    } else if ("[auto]".equals(masterUrl)) {
        flinkBatchEnv = ExecutionEnvironment.getExecutionEnvironment();
    } else {
        int defaultPort = flinkConfiguration.getInteger(RestOptions.PORT);
        HostAndPort hostAndPort = HostAndPort.fromString(masterUrl).withDefaultPort(defaultPort);
        flinkConfiguration.setInteger(RestOptions.PORT, hostAndPort.getPort());
        flinkBatchEnv = ExecutionEnvironment.createRemoteEnvironment(hostAndPort.getHost(),
                hostAndPort.getPort(), flinkConfiguration,
                filesToStage.toArray(new String[filesToStage.size()]));
        LOG.info("Using Flink Master URL {}:{}.", hostAndPort.getHost(), hostAndPort.getPort());
    }

    // Set the execution more for data exchange.
    flinkBatchEnv.getConfig().setExecutionMode(options.getExecutionModeForBatch());

    // set the correct parallelism.
    if (options.getParallelism() != -1 && !(flinkBatchEnv instanceof CollectionEnvironment)) {
        flinkBatchEnv.setParallelism(options.getParallelism());
    }
    // Set the correct parallelism, required by UnboundedSourceWrapper to generate consistent splits.
    final int parallelism;
    if (flinkBatchEnv instanceof CollectionEnvironment) {
        parallelism = 1;
    } else {
        parallelism = determineParallelism(options.getParallelism(), flinkBatchEnv.getParallelism(),
                flinkConfiguration);
    }

    flinkBatchEnv.setParallelism(parallelism);
    // set parallelism in the options (required by some execution code)
    options.setParallelism(parallelism);

    if (options.getObjectReuse()) {
        flinkBatchEnv.getConfig().enableObjectReuse();
    } else {
        flinkBatchEnv.getConfig().disableObjectReuse();
    }

    applyLatencyTrackingInterval(flinkBatchEnv.getConfig(), options);

    return flinkBatchEnv;
}

From source file:org.apache.brooklyn.entity.nosql.mongodb.MongoDBClientSupport.java

/**
 * Creates a {@link MongoDBClientSupport} instance in standalone mode.
 *//*w w  w .j  a v  a  2  s  .c o  m*/
public static MongoDBClientSupport forServer(AbstractMongoDBServer standalone) throws UnknownHostException {
    HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(standalone,
            standalone.getAttribute(MongoDBServer.PORT));
    ServerAddress address = new ServerAddress(hostAndPort.getHostText(), hostAndPort.getPort());
    if (MongoDBAuthenticationUtils.usesAuthentication(standalone)) {
        return new MongoDBClientSupport(address,
                standalone.sensors().get(MongoDBAuthenticationMixins.ROOT_USERNAME),
                standalone.sensors().get(MongoDBAuthenticationMixins.ROOT_PASSWORD),
                standalone.sensors().get(MongoDBAuthenticationMixins.AUTHENTICATION_DATABASE));
    } else {
        return new MongoDBClientSupport(address);
    }
}

From source file:org.apache.beam.runners.flink.FlinkExecutionEnvironments.java

@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options,
        List<String> filesToStage, @Nullable String confDir) {

    LOG.info("Creating a Streaming Environment.");

    String masterUrl = options.getFlinkMaster();
    Configuration flinkConfig = getFlinkConfiguration(confDir);
    final StreamExecutionEnvironment flinkStreamEnv;

    // depending on the master, create the right environment.
    if ("[local]".equals(masterUrl)) {
        flinkStreamEnv = StreamExecutionEnvironment.createLocalEnvironment(getDefaultLocalParallelism(),
                flinkConfig);//from w  w  w .ja  va  2s. c o  m
    } else if ("[auto]".equals(masterUrl)) {
        flinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    } else {
        int defaultPort = flinkConfig.getInteger(RestOptions.PORT);
        HostAndPort hostAndPort = HostAndPort.fromString(masterUrl).withDefaultPort(defaultPort);
        flinkConfig.setInteger(RestOptions.PORT, hostAndPort.getPort());
        final SavepointRestoreSettings savepointRestoreSettings;
        if (options.getSavepointPath() != null) {
            savepointRestoreSettings = SavepointRestoreSettings.forPath(options.getSavepointPath(),
                    options.getAllowNonRestoredState());
        } else {
            savepointRestoreSettings = SavepointRestoreSettings.none();
        }
        flinkStreamEnv = new BeamFlinkRemoteStreamEnvironment(hostAndPort.getHost(), hostAndPort.getPort(),
                flinkConfig, savepointRestoreSettings, filesToStage.toArray(new String[filesToStage.size()]));
        LOG.info("Using Flink Master URL {}:{}.", hostAndPort.getHost(), hostAndPort.getPort());
    }

    // Set the parallelism, required by UnboundedSourceWrapper to generate consistent splits.
    final int parallelism = determineParallelism(options.getParallelism(), flinkStreamEnv.getParallelism(),
            flinkConfig);
    flinkStreamEnv.setParallelism(parallelism);
    if (options.getMaxParallelism() > 0) {
        flinkStreamEnv.setMaxParallelism(options.getMaxParallelism());
    }
    // set parallelism in the options (required by some execution code)
    options.setParallelism(parallelism);

    if (options.getObjectReuse()) {
        flinkStreamEnv.getConfig().enableObjectReuse();
    } else {
        flinkStreamEnv.getConfig().disableObjectReuse();
    }

    // default to event time
    flinkStreamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

    // for the following 2 parameters, a value of -1 means that Flink will use
    // the default values as specified in the configuration.
    int numRetries = options.getNumberOfExecutionRetries();
    if (numRetries != -1) {
        flinkStreamEnv.setNumberOfExecutionRetries(numRetries);
    }
    long retryDelay = options.getExecutionRetryDelay();
    if (retryDelay != -1) {
        flinkStreamEnv.getConfig().setExecutionRetryDelay(retryDelay);
    }

    // A value of -1 corresponds to disabled checkpointing (see CheckpointConfig in Flink).
    // If the value is not -1, then the validity checks are applied.
    // By default, checkpointing is disabled.
    long checkpointInterval = options.getCheckpointingInterval();
    if (checkpointInterval != -1) {
        if (checkpointInterval < 1) {
            throw new IllegalArgumentException("The checkpoint interval must be positive");
        }
        flinkStreamEnv.enableCheckpointing(checkpointInterval, options.getCheckpointingMode());
        if (options.getCheckpointTimeoutMillis() != -1) {
            flinkStreamEnv.getCheckpointConfig().setCheckpointTimeout(options.getCheckpointTimeoutMillis());
        }
        boolean externalizedCheckpoint = options.isExternalizedCheckpointsEnabled();
        boolean retainOnCancellation = options.getRetainExternalizedCheckpointsOnCancellation();
        if (externalizedCheckpoint) {
            flinkStreamEnv.getCheckpointConfig().enableExternalizedCheckpoints(
                    retainOnCancellation ? ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
                            : ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);
        }

        long minPauseBetweenCheckpoints = options.getMinPauseBetweenCheckpoints();
        if (minPauseBetweenCheckpoints != -1) {
            flinkStreamEnv.getCheckpointConfig().setMinPauseBetweenCheckpoints(minPauseBetweenCheckpoints);
        }
    } else {
        // https://issues.apache.org/jira/browse/FLINK-2491
        // Checkpointing is disabled, we can allow shutting down sources when they're done
        options.setShutdownSourcesOnFinalWatermark(true);
    }

    applyLatencyTrackingInterval(flinkStreamEnv.getConfig(), options);

    if (options.getAutoWatermarkInterval() != null) {
        flinkStreamEnv.getConfig().setAutoWatermarkInterval(options.getAutoWatermarkInterval());
    }

    // State backend
    final StateBackend stateBackend = options.getStateBackend();
    if (stateBackend != null) {
        flinkStreamEnv.setStateBackend(stateBackend);
    }

    return flinkStreamEnv;
}

From source file:brooklyn.util.net.Networking.java

public static boolean isReachable(HostAndPort endpoint) {
    try {//from  ww w  .j ava2  s  .  c o  m
        Socket s = new Socket(endpoint.getHostText(), endpoint.getPort());
        try {
            s.close();
        } catch (Exception e) {
            log.debug("Error closing socket, opened temporarily to check reachability to " + endpoint
                    + " (continuing)", e);
        }
        return true;
    } catch (Exception e) {
        if (log.isTraceEnabled())
            log.trace("Error reaching " + endpoint + " during reachability check (return false)", e);
        return false;
    }
}

From source file:com.facebook.presto.jdbc.ext.PrestoConnection.java

private static URI createHttpUri(HostAndPort address) {
    return uriBuilder().scheme("http").host(address.getHostText()).port(address.getPort()).build();
}