Example usage for com.google.common.net HostAndPort fromParts

List of usage examples for com.google.common.net HostAndPort fromParts

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort fromParts.

Prototype

public static HostAndPort fromParts(String host, int port) 

Source Link

Document

Build a HostAndPort instance from separate host and port values.

Usage

From source file:com.streamsets.pipeline.kafka.impl.KafkaLowLevelConsumer08.java

private PartitionMetadata getPartitionMetadata(List<HostAndPort> brokers, String topic, int partition) {
    PartitionMetadata returnMetaData = null;
    for (HostAndPort broker : brokers) {
        SimpleConsumer simpleConsumer = null;
        try {//from w w w. j  av  a  2 s  .co  m
            LOG.info(
                    "Creating SimpleConsumer using the following configuration: host {}, port {}, max wait time {}, max "
                            + "fetch size {}, client columnName {}",
                    broker.getHostText(), broker.getPort(), METADATA_READER_TIME_OUT, BUFFER_SIZE,
                    METADATA_READER_CLIENT);
            simpleConsumer = new SimpleConsumer(broker.getHostText(), broker.getPort(),
                    METADATA_READER_TIME_OUT, BUFFER_SIZE, METADATA_READER_CLIENT);

            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest req = new TopicMetadataRequest(topics);
            kafka.javaapi.TopicMetadataResponse resp = simpleConsumer.send(req);

            List<TopicMetadata> metaData = resp.topicsMetadata();
            for (TopicMetadata item : metaData) {
                for (PartitionMetadata part : item.partitionsMetadata()) {
                    if (part.partitionId() == partition) {
                        returnMetaData = part;
                        break;
                    }
                }
            }
        } catch (Exception e) {
            LOG.error(KafkaErrors.KAFKA_25.getMessage(), broker.toString(), topic, partition, e.toString(), e);
        } finally {
            if (simpleConsumer != null) {
                simpleConsumer.close();
            }
        }
    }
    if (returnMetaData != null) {
        replicaBrokers.clear();
        for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
            replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
        }
    }
    return returnMetaData;
}

From source file:com.streamsets.pipeline.kafka.impl.KafkaLowLevelConsumer09.java

private PartitionMetadata getPartitionMetadata(List<HostAndPort> brokers, String topic, int partition) {
    PartitionMetadata returnMetaData = null;
    for (HostAndPort broker : brokers) {
        SimpleConsumer simpleConsumer = null;
        try {//from ww w.ja  v a2 s. co m
            LOG.info(
                    "Creating SimpleConsumer using the following configuration: host {}, port {}, max wait time {}, max "
                            + "fetch size {}, client columnName {}",
                    broker.getHostText(), broker.getPort(), METADATA_READER_TIME_OUT, BUFFER_SIZE,
                    METADATA_READER_CLIENT);
            simpleConsumer = new SimpleConsumer(broker.getHostText(), broker.getPort(),
                    METADATA_READER_TIME_OUT, BUFFER_SIZE, METADATA_READER_CLIENT);

            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest req = new TopicMetadataRequest(topics);
            kafka.javaapi.TopicMetadataResponse resp = simpleConsumer.send(req);

            List<TopicMetadata> metaData = resp.topicsMetadata();
            for (TopicMetadata item : metaData) {
                for (PartitionMetadata part : item.partitionsMetadata()) {
                    if (part.partitionId() == partition) {
                        returnMetaData = part;
                        break;
                    }
                }
            }
        } catch (Exception e) {
            LOG.error(KafkaErrors.KAFKA_25.getMessage(), broker.toString(), topic, partition, e.toString(), e);
        } finally {
            if (simpleConsumer != null) {
                simpleConsumer.close();
            }
        }
    }
    if (returnMetaData != null) {
        replicaBrokers.clear();
        for (kafka.cluster.BrokerEndPoint replica : returnMetaData.replicas()) {
            replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
        }
    }
    return returnMetaData;
}

From source file:org.apache.accumulo.server.util.TServerUtils.java

public static ServerAddress createHsHaServer(HostAndPort address, TProcessor processor, final String serverName,
        String threadName, final int numThreads, long timeBetweenThreadChecks, long maxMessageSize)
        throws TTransportException {
    TNonblockingServerSocket transport = new TNonblockingServerSocket(
            new InetSocketAddress(address.getHostText(), address.getPort()));
    THsHaServer.Args options = new THsHaServer.Args(transport);
    options.protocolFactory(ThriftUtil.protocolFactory());
    options.transportFactory(ThriftUtil.transportFactory(maxMessageSize));
    options.maxReadBufferBytes = maxMessageSize;
    options.stopTimeoutVal(5);/*from   w  ww  . j a  va 2s  .  co  m*/
    /*
     * Create our own very special thread pool.
     */
    final ThreadPoolExecutor pool = new SimpleThreadPool(numThreads, "ClientPool");
    // periodically adjust the number of threads we need by checking how busy our threads are
    SimpleTimer.getInstance().schedule(new Runnable() {
        @Override
        public void run() {
            if (pool.getCorePoolSize() <= pool.getActiveCount()) {
                int larger = pool.getCorePoolSize() + Math.min(pool.getQueue().size(), 2);
                log.info("Increasing server thread pool size on " + serverName + " to " + larger);
                pool.setMaximumPoolSize(larger);
                pool.setCorePoolSize(larger);
            } else {
                if (pool.getCorePoolSize() > pool.getActiveCount() + 3) {
                    int smaller = Math.max(numThreads, pool.getCorePoolSize() - 1);
                    if (smaller != pool.getCorePoolSize()) {
                        // there is a race condition here... the active count could be higher by the time
                        // we decrease the core pool size... so the active count could end up higher than
                        // the core pool size, in which case everything will be queued... the increase case
                        // should handle this and prevent deadlock
                        log.info("Decreasing server thread pool size on " + serverName + " to " + smaller);
                        pool.setCorePoolSize(smaller);
                    }
                }
            }
        }
    }, timeBetweenThreadChecks, timeBetweenThreadChecks);
    options.executorService(pool);
    options.processorFactory(new TProcessorFactory(processor));
    if (address.getPort() == 0) {
        address = HostAndPort.fromParts(address.getHostText(), transport.getPort());
    }
    return new ServerAddress(new THsHaServer(options), address);
}

From source file:io.druid.firehose.kafka.KafkaSimpleConsumer.java

private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
    long retryCnt = 0;
    while (true) {
        PartitionMetadata metadata = findLeader();
        if (metadata != null) {
            replicaBrokers.clear();//from  www.  j a  v a2 s.  c  o  m
            for (Broker replica : metadata.replicas()) {
                replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
            }

            log.debug("Got new Kafka leader metadata : [%s], previous leader : [%s]", metadata, oldLeader);
            Broker newLeader = metadata.leader();
            if (newLeader != null) {
                // We check the retryCnt here as well to make sure that we have slept a little bit
                // if we don't notice a change in leadership
                // just in case if Zookeeper doesn't get updated fast enough
                if (oldLeader == null || isValidNewLeader(newLeader) || retryCnt != 0) {
                    return newLeader;
                }
            }
        }

        Thread.sleep(RETRY_INTERVAL);
        retryCnt++;
        // if could not find the leader for current replicaBrokers, let's try to
        // find one via allBrokers
        if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
            log.warn("cannot find leader for [%s] - [%s] after [%s] retries", topic, partitionId, retryCnt);
            replicaBrokers.clear();
            replicaBrokers.addAll(allBrokers);
        }
    }
}

From source file:org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint.java

@Override
public void getRSGroupInfoOfServer(RpcController controller, GetRSGroupInfoOfServerRequest request,
        RpcCallback<GetRSGroupInfoOfServerResponse> done) {
    GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
    try {//from   w  w  w  . j a  v  a2s.c o  m
        HostAndPort hp = HostAndPort.fromParts(request.getServer().getHostName(),
                request.getServer().getPort());
        RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupOfServer(hp);
        if (RSGroupInfo != null) {
            builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    }
    done.run(builder.build());
}

From source file:alluxio.hadoop.AbstractFileSystem.java

@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) throws IOException {
    if (file == null) {
        return null;
    }/* ww  w .  j  a  v  a  2  s.  c  o m*/
    if (mStatistics != null) {
        mStatistics.incrementReadOps(1);
    }

    AlluxioURI path = new AlluxioURI(HadoopUtils.getPathWithoutScheme(file.getPath()));
    List<FileBlockInfo> blocks = getFileBlocks(path);
    List<BlockLocation> blockLocations = new ArrayList<>();
    for (FileBlockInfo fileBlockInfo : blocks) {
        long offset = fileBlockInfo.getOffset();
        long end = offset + fileBlockInfo.getBlockInfo().getLength();
        // Check if there is any overlapping between [start, start+len] and [offset, end]
        if (end >= start && offset <= start + len) {
            ArrayList<String> names = new ArrayList<>();
            ArrayList<String> hosts = new ArrayList<>();
            // add the existing in-memory block locations
            for (alluxio.wire.BlockLocation location : fileBlockInfo.getBlockInfo().getLocations()) {
                HostAndPort address = HostAndPort.fromParts(location.getWorkerAddress().getHost(),
                        location.getWorkerAddress().getDataPort());
                names.add(address.toString());
                hosts.add(address.getHostText());
            }
            // add under file system locations
            for (String location : fileBlockInfo.getUfsLocations()) {
                names.add(location);
                hosts.add(HostAndPort.fromString(location).getHostText());
            }
            blockLocations.add(new BlockLocation(CommonUtils.toStringArray(names),
                    CommonUtils.toStringArray(hosts), offset, fileBlockInfo.getBlockInfo().getLength()));
        }
    }

    BlockLocation[] ret = new BlockLocation[blockLocations.size()];
    blockLocations.toArray(ret);
    return ret;
}

From source file:org.apache.druid.firehose.kafka.KafkaSimpleConsumer.java

private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
    long retryCnt = 0;
    while (true) {
        PartitionMetadata metadata = findLeader();
        if (metadata != null) {
            replicaBrokers.clear();//from ww  w  .j a  v  a2 s.com
            for (Broker replica : metadata.replicas()) {
                replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
            }

            log.debug("Got new Kafka leader metadata : [%s], previous leader : [%s]", metadata, oldLeader);
            Broker newLeader = metadata.leader();
            if (newLeader != null) {
                // We check the retryCnt here as well to make sure that we have slept a little bit
                // if we don't notice a change in leadership
                // just in case if Zookeeper doesn't get updated fast enough
                if (oldLeader == null || isValidNewLeader(newLeader) || retryCnt != 0) {
                    return newLeader;
                }
            }
        }

        Thread.sleep(RETRY_INTERVAL_MILLIS);
        retryCnt++;
        // if could not find the leader for current replicaBrokers, let's try to
        // find one via allBrokers
        if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
            log.warn("cannot find leader for [%s] - [%s] after [%s] retries", topic, partitionId, retryCnt);
            replicaBrokers.clear();
            replicaBrokers.addAll(allBrokers);
        }
    }
}

From source file:org.jclouds.virtualbox.functions.MastersLoadingCache.java

private String getFilePathOrDownload(String httpUrl, String expectedMd5) throws ExecutionException {
    String fileName = httpUrl.substring(httpUrl.lastIndexOf('/') + 1, httpUrl.length());
    URI provider = providerSupplier.get();
    if (!socketTester.apply(HostAndPort.fromParts(provider.getHost(), provider.getPort()))) {
        throw new RuntimeException("could not connect to virtualbox");
    }/*from   w w  w  .  ja va 2  s .c  o m*/
    File file = new File(isosDir, fileName);
    List<Statement> statements = new ImmutableList.Builder<Statement>()
            .add(Statements.saveHttpResponseTo(URI.create(httpUrl), isosDir, fileName)).build();
    StatementList statementList = new StatementList(statements);
    NodeMetadata hostNode = checkNotNull(hardcodedHostToHostNodeMetadata.apply(host.get()), "hostNode");
    ListenableFuture<ExecResponse> future = runScriptOnNodeFactory.submit(hostNode, statementList,
            runAsRoot(false));
    Futures.getUnchecked(future);

    if (expectedMd5 != null) {
        String filePath = isosDir + File.separator + fileName;
        ListenableFuture<ExecResponse> md5future = runScriptOnNodeFactory.submit(hostNode, new Md5(filePath),
                runAsRoot(false));

        ExecResponse responseMd5 = Futures.getUnchecked(md5future);
        assert responseMd5.getExitStatus() == 0 : hostNode.getId() + ": " + responseMd5;
        checkNotNull(responseMd5.getOutput(), "iso_md5 missing");
        String actualMd5 = responseMd5.getOutput().trim();
        checkState(actualMd5.equals(expectedMd5), "md5 of %s is %s but expected %s", filePath, actualMd5,
                expectedMd5);
    }
    return file.getAbsolutePath();
}

From source file:brooklyn.networking.vclouddirector.PortForwarderVcloudDirector.java

/**
 * Deletes the NAT rule for the given port.
 * /*from   w  w  w  .j a v  a  2s  . c om*/
 * Expects caller to call {@link PortForwardManager#forgetPortMapping(String, int)}
 */
public void closePortForwarding(EntityAndAttribute<Integer> privatePort, int publicPort) {
    Entity entity = privatePort.getEntity();
    Integer targetPort = privatePort.getValue();
    MachineLocation machine = Machines.findUniqueMachineLocation(entity.getLocations()).get();
    String targetIp = Iterables
            .getFirst(Iterables.concat(machine.getPrivateAddresses(), machine.getPublicAddresses()), null);
    if (targetIp == null) {
        throw new IllegalStateException("Failed to close port-forwarding for machine " + machine
                + " because its location has no target ip: " + machine);
    }
    HostAndPort targetSide = HostAndPort.fromParts(targetIp, targetPort);
    HostAndPort publicSide = HostAndPort.fromParts(subnetTier.getConfig(NETWORK_PUBLIC_IP), publicPort);

    closePortForwarding(targetSide, publicSide, Protocol.TCP);
}

From source file:org.apache.accumulo.server.util.TServerUtils.java

public static ServerAddress createThreadPoolServer(HostAndPort address, TProcessor processor, String serverName,
        String threadName, int numThreads) throws TTransportException {

    // if port is zero, then we must bind to get the port number
    ServerSocket sock;//from   ww w.ja va 2 s  .co m
    try {
        sock = ServerSocketChannel.open().socket();
        sock.setReuseAddress(true);
        sock.bind(new InetSocketAddress(address.getHostText(), address.getPort()));
        address = HostAndPort.fromParts(address.getHostText(), sock.getLocalPort());
    } catch (IOException ex) {
        throw new TTransportException(ex);
    }
    TServerTransport transport = new TBufferedServerSocket(sock, 32 * 1024);
    return new ServerAddress(createThreadPoolServer(transport, processor), address);
}