Example usage for java.net InetSocketAddress getHostName

List of usage examples for java.net InetSocketAddress getHostName

Introduction

In this page you can find the example usage for java.net InetSocketAddress getHostName.

Prototype

public final String getHostName() 

Source Link

Document

Gets the hostname .

Usage

From source file:org.apache.tez.dag.api.client.DAGClientServer.java

private Server createServer(Class<?> pbProtocol, InetSocketAddress addr, Configuration conf, int numHandlers,
        BlockingService blockingService, String portRangeConfig) throws IOException {
    RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class);
    RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol).setInstance(blockingService)
            .setBindAddress(addr.getHostName()).setPort(addr.getPort()).setNumHandlers(numHandlers)
            .setVerbose(false).setPortRangeConfig(portRangeConfig).build();
    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService);
    return server;
}

From source file:org.apache.hadoop.hdfsproxy.TestHdfsProxy.java

/** verify hdfsproxy implements the hftp interface */
private void doTestHdfsProxyInterface() throws Exception {
    MiniDFSCluster cluster = null;//from ww w.  j  a  va2s.c o m
    HdfsProxy proxy = null;
    try {
        final UserGroupInformation CLIENT_UGI = UserGroupInformation.getCurrentUser();
        final String testUser = CLIENT_UGI.getShortUserName();
        final String testGroup = CLIENT_UGI.getGroupNames()[0];

        final Configuration dfsConf = new Configuration();
        dfsConf.set("hadoop.proxyuser." + testUser + ".groups", testGroup);
        dfsConf.set("hadoop.proxyuser." + testGroup + ".hosts", "127.0.0.1,localhost");
        dfsConf.set("hadoop.proxyuser." + testUser + ".hosts", "127.0.0.1,localhost");
        dfsConf.set("hadoop.security.authentication", "simple");

        //make sure server will look at the right config
        ProxyUsers.refreshSuperUserGroupsConfiguration(dfsConf);

        cluster = new MiniDFSCluster(dfsConf, 2, true, null);
        cluster.waitActive();

        final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);
        final FileSystem hdfs = cluster.getFileSystem();
        final Configuration proxyConf = new Configuration(false);
        proxyConf.set("hdfsproxy.dfs.namenode.address",
                hdfs.getUri().getHost() + ":" + hdfs.getUri().getPort());
        proxyConf.set("hdfsproxy.https.address", "localhost:0");
        final String namenode = hdfs.getUri().toString();
        if (namenode.startsWith("hdfs://")) {
            MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR + "/srcdat");

            hdfs.copyFromLocalFile(new Path("file:///" + TEST_ROOT_DIR + "/srcdat"),
                    new Path(namenode + "/destdat"));
            assertTrue("Source and destination directories do not match.", checkFiles(hdfs, "/destdat", files));

            proxyConf.set("proxy.http.test.listener.addr", "localhost:0");
            proxy = new HdfsProxy(proxyConf);
            proxy.start();
            InetSocketAddress proxyAddr = NetUtils.createSocketAddr("localhost:0");
            final String realProxyAddr = proxyAddr.getHostName() + ":" + proxy.getPort();
            final Path proxyUrl = new Path("hftp://" + realProxyAddr);
            final FileSystem hftp = proxyUrl.getFileSystem(dfsConf);

            FileUtil.copy(hftp, new Path(proxyUrl, "/destdat"), hdfs, new Path(namenode + "/copied1"), false,
                    true, proxyConf);

            assertTrue("Source and copied directories do not match.", checkFiles(hdfs, "/copied1", files));

            FileUtil.copy(hftp, new Path(proxyUrl, "/destdat"), localfs, new Path(TEST_ROOT_DIR + "/copied2"),
                    false, true, proxyConf);
            assertTrue("Source and copied directories do not match.",
                    checkFiles(localfs, TEST_ROOT_DIR + "/copied2", files));

            deldir(hdfs, "/destdat");
            deldir(hdfs, "/logs");
            deldir(hdfs, "/copied1");
            deldir(localfs, TEST_ROOT_DIR + "/srcdat");
            deldir(localfs, TEST_ROOT_DIR + "/copied2");
        }
        if (cluster != null) {
            cluster.shutdown();
        }
        if (proxy != null) {
            proxy.stop();
        }
    } catch (Exception t) {
        LOG.fatal("caught exception in test", t);
        if (cluster != null) {
            cluster.shutdown();
        }
        if (proxy != null) {
            proxy.stop();
        }
        throw t;
    }
}

From source file:org.apache.nifi.cluster.integration.Node.java

private String getClusterAddress() {
    final InetSocketAddress address = nodeProperties.getClusterNodeProtocolAddress();
    return address.getHostName() + ":" + address.getPort();
}

From source file:org.opcfoundation.ua.transport.https.HttpsClientPendingRequest.java

@Override
public void run() {
    try {//from  w  w  w . j  a  va  2  s  . c o m
        // Abort exit branch
        if (abortCode != null) {
            result.setError(new ServiceResultException(abortCode));
            return;
        }

        // Http Post
        InetSocketAddress inetAddress = UriUtil.getSocketAddress(httpsClient.connectUrl);
        String host = inetAddress.getHostName();
        int port = inetAddress.getPort();
        String scheme = UriUtil.getTransportProtocol(httpsClient.connectUrl);
        HttpHost httpHost = new HttpHost(host, port, scheme);
        String url = httpsClient.transportChannelSettings.getDescription().getEndpointUrl();
        String endpointId = url == null ? "" : url; //UriUtil.getEndpointName(url);
        httpPost = new HttpPost(endpointId);
        httpPost.addHeader("OPCUA-SecurityPolicy", httpsClient.securityPolicyUri);
        httpPost.addHeader("Content-Type", "application/octet-stream");

        // Calculate message length
        EncoderCalc calc = new EncoderCalc();
        calc.setEncoderContext(httpsClient.encoderCtx);
        calc.putMessage(requestMessage);
        int len = calc.getLength();

        // Assert max size is not exceeded
        int maxLen = httpsClient.encoderCtx.getMaxMessageSize();
        if (maxLen != 0 && len > maxLen) {
            final EncodingException encodingException = new EncodingException(
                    StatusCodes.Bad_EncodingLimitsExceeded, "MaxStringLength " + maxLen + " < " + len);
            logger.warn("run: failed", encodingException);
            throw encodingException;
        }

        // Encode message
        byte[] data = new byte[len];
        BinaryEncoder enc = new BinaryEncoder(data);
        enc.setEncoderContext(httpsClient.encoderCtx);
        enc.setEncoderMode(EncoderMode.NonStrict);
        enc.putMessage(requestMessage);
        httpPost.setEntity(new NByteArrayEntity(data));

        // Abort exit branch
        if (abortCode != null) {
            result.setError(new ServiceResultException(abortCode));
            return;
        }

        // Execute Post

        HttpResponse httpResponse;
        try {
            httpResponse = httpsClient.httpclient.execute(httpHost, httpPost);
        } catch (SSLPeerUnverifiedException e) {
            // Currently, TLS_1_2 is not supported by JSSE implementations, for some odd reason
            // and it will give this exception when used.
            // Also, if the server certificate is rejected, we will get this error
            result.setError(new ServiceResultException(StatusCodes.Bad_SecurityPolicyRejected, e,
                    "Could not negotiate a TLS security cipher or the server did not provide a valid certificate."));
            return;
        }
        HttpEntity entity = httpResponse.getEntity();

        // Error response
        int statusCode = httpResponse.getStatusLine().getStatusCode();
        if (statusCode != 200) {
            UnsignedInteger uacode = StatusCodes.Bad_UnknownResponse;
            if (statusCode == 501)
                uacode = StatusCodes.Bad_ServiceUnsupported;
            String msg = EntityUtils.toString(entity);
            result.setError(new ServiceResultException(uacode, statusCode + ": " + msg));
            return;
        }

        // Abort exit branch
        if (abortCode != null) {
            result.setError(new ServiceResultException(abortCode));
            return;
        }

        // Decode Message
        data = EntityUtils.toByteArray(entity);

        BinaryDecoder dec = new BinaryDecoder(data);
        dec.setEncoderContext(httpsClient.encoderCtx);
        IEncodeable response = dec.getMessage();

        // Client sent an error
        if (response instanceof ErrorMessage) {
            ErrorMessage error = (ErrorMessage) response;
            ServiceResultException errorResult = new ServiceResultException(new StatusCode(error.getError()),
                    error.getReason());
            result.setError(errorResult);
            return;
        }

        try {
            // Client sent a valid message
            result.setResult((ServiceResponse) response);
        } catch (ClassCastException e) {
            result.setError(new ServiceResultException(e));
            logger.error("Cannot cast response to ServiceResponse, response=" + response.getClass(), e);
        }
    } catch (EncodingException e) {
        // Internal Error
        result.setError(new ServiceResultException(StatusCodes.Bad_EncodingError, e));
    } catch (ClientProtocolException e) {
        result.setError(new ServiceResultException(StatusCodes.Bad_CommunicationError, e));
    } catch (IOException e) {
        if (abortCode != null) {
            result.setError(new ServiceResultException(abortCode, e));
        } else {
            result.setError(new ServiceResultException(StatusCodes.Bad_CommunicationError, e));
        }
    } catch (DecodingException e) {
        result.setError(new ServiceResultException(StatusCodes.Bad_DecodingError, e));
    } catch (ServiceResultException e) {
        result.setError(e);
    } catch (RuntimeException rte) {
        // http-client seems to be throwing these, IllegalArgumentException for one
        result.setError(new ServiceResultException(rte));
    } finally {
        httpsClient.requests.remove(requestId);
    }
}

From source file:org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup.java

public synchronized void startDataNodes(Configuration conf, int numDataNodes, boolean manageDfsDirs,
        StartupOption operation, String[] racks, String[] nodeGroups, String[] hosts,
        long[] simulatedCapacities) throws IOException {
    conf.set("slave.host.name", "127.0.0.1");

    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }//  www  .j a  v a  2s  .  c om
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (nameNode != null) { // set conf from the name node
        InetSocketAddress nnAddr = nameNode.getNameNodeAddress();
        int nameNodePort = nnAddr.getPort();
        FileSystem.setDefaultUri(conf, "hdfs://" + nnAddr.getHostName() + ":" + Integer.toString(nameNodePort));
    }
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (nodeGroups != null && numDataNodes > nodeGroups.length) {
        throw new IllegalArgumentException("The length of nodeGroups [" + nodeGroups.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    //Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }

    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    // Set up the right ports for the datanodes
    conf.set("dfs.datanode.address", "127.0.0.1:0");
    conf.set("dfs.datanode.http.address", "127.0.0.1:0");
    conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");

    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null
            : new String[] { operation.getName() };

    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new Configuration(conf);

        if (manageDfsDirs) {
            File dir1 = new File(data_dir, "data" + (2 * i + 1));
            File dir2 = new File(data_dir, "data" + (2 * i + 2));
            dir1.mkdirs();
            dir2.mkdirs();
            if (!dir1.isDirectory() || !dir2.isDirectory()) {
                throw new IOException(
                        "Mkdirs failed to create directory for DataNode " + i + ": " + dir1 + " or " + dir2);
            }
            dnConf.set(DataNode.DATA_DIR_KEY, dir1.getPath() + "," + dir2.getPath());
        }
        if (simulatedCapacities != null) {
            dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
                    simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set("slave.host.name", hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: " + dnConf.get("slave.host.name"));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            if (nodeGroups == null) {
                LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
            } else {
                LOG.info("Adding node with hostname : " + name + " to serverGroup "
                        + nodeGroups[i - curDatanodesNum] + " and rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum] + nodeGroups[i - curDatanodesNum]);
            }
        }
        Configuration newconf = new Configuration(dnConf); // save config
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }
        DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
        if (dn == null)
            throw new IOException(
                    "Cannot start DataNode in " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        //since the HDFS does things based on IP:port, we need to add the mapping
        //for IP:port to rackId
        String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
        if (racks != null) {
            int port = dn.getSelfAddr().getPort();
            if (nodeGroups == null) {
                LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack "
                        + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i - curDatanodesNum]);
            } else {
                LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup "
                        + nodeGroups[i - curDatanodesNum] + " and rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(ipAddr + ":" + port,
                        racks[i - curDatanodesNum] + nodeGroups[i - curDatanodesNum]);
            }
        }
        DataNode.runDatanodeDaemon(dn);
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
}

From source file:org.apache.hama.bsp.TaskRunner.java

private List<String> buildJvmArgs(BSPJob jobConf, String classPath, Class<?> child) {
    // Build exec child jmv args.
    List<String> vargs = new ArrayList<String>();
    File jvm = // use same jvm as parent
            new File(new File(System.getProperty("java.home"), "bin"), "java");
    vargs.add(jvm.toString());/*  w  w  w  .  j av  a 2  s.c o  m*/

    // bsp.child.java.opts
    String javaOpts = jobConf.getConfiguration().get("bsp.child.java.opts", "-Xmx200m");
    javaOpts = javaOpts.replace("@taskid@", task.getTaskID().toString());

    String[] javaOptsSplit = javaOpts.split(" ");
    Collections.addAll(vargs, javaOptsSplit);

    // Add classpath.
    vargs.add("-classpath");
    vargs.add(classPath);
    // Add main class and its arguments
    LOG.debug("Executing child Process " + child.getName());
    vargs.add(child.getName()); // bsp class name

    if (GroomServer.BSPPeerChild.class.equals(child)) {
        InetSocketAddress addr = groomServer.getTaskTrackerReportAddress();
        vargs.add(addr.getHostName());
        vargs.add(Integer.toString(addr.getPort()));
        vargs.add(task.getTaskID().toString());
        vargs.add(groomServer.groomHostName);
        vargs.add(Long.toString(groomServer.getStartSuperstep(task.getTaskID())));
        TaskStatus status = groomServer.getTaskStatus(task.getTaskID());

        if (status != null && TaskStatus.State.RECOVERING.equals(status.getRunState())) {
            vargs.add(TaskStatus.State.RECOVERING.name());
        } else {
            vargs.add(TaskStatus.State.RUNNING.name());
        }

    }
    return vargs;
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer.java

private void doSecureLogin(Configuration conf) throws IOException {
    InetSocketAddress socAddr = getBindAddress(conf);
    SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
            YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, socAddr.getHostName());
}

From source file:org.jolokia.jvmagent.JolokiaHttpHandler.java

@SuppressWarnings({ "PMD.AvoidCatchingThrowable", "PMD.AvoidInstanceofChecksInCatchClause" })
public void doHandle(HttpExchange pExchange) throws IOException {
    if (requestHandler == null) {
        throw new IllegalStateException("Handler not yet started");
    }//from   w  w  w  .  j  av a 2s. c om

    JSONAware json = null;
    URI uri = pExchange.getRequestURI();
    ParsedUri parsedUri = new ParsedUri(uri, context);
    try {
        // Check access policy
        InetSocketAddress address = pExchange.getRemoteAddress();
        requestHandler.checkAccess(address.getHostName(), address.getAddress().getHostAddress(),
                extractOriginOrReferer(pExchange));
        String method = pExchange.getRequestMethod();

        // Dispatch for the proper HTTP request method
        if ("GET".equalsIgnoreCase(method)) {
            setHeaders(pExchange);
            json = executeGetRequest(parsedUri);
        } else if ("POST".equalsIgnoreCase(method)) {
            setHeaders(pExchange);
            json = executePostRequest(pExchange, parsedUri);
        } else if ("OPTIONS".equalsIgnoreCase(method)) {
            performCorsPreflightCheck(pExchange);
        } else {
            throw new IllegalArgumentException("HTTP Method " + method + " is not supported.");
        }
    } catch (Throwable exp) {
        json = requestHandler.handleThrowable(
                exp instanceof RuntimeMBeanException ? ((RuntimeMBeanException) exp).getTargetException()
                        : exp);
    } finally {
        sendResponse(pExchange, parsedUri, json);
    }
}

From source file:co.rsk.net.discovery.PeerExplorer.java

public PingPeerMessage sendPing(InetSocketAddress nodeAddress, int attempt, Node node) {
    PingPeerMessage nodeMessage = checkPendingPeerToAddress(nodeAddress);
    if (nodeMessage != null) {
        return nodeMessage;
    }//from   www.j  av  a2s.c  o  m
    InetSocketAddress localAddress = this.localNode.getAddress();
    String id = UUID.randomUUID().toString();
    nodeMessage = PingPeerMessage.create(localAddress.getHostName(), localAddress.getPort(), id, this.key);
    udpChannel.write(new DiscoveryEvent(nodeMessage, nodeAddress));

    PeerDiscoveryRequest request = PeerDiscoveryRequestBuilder.builder().messageId(id).message(nodeMessage)
            .address(nodeAddress).expectedResponse(DiscoveryMessageType.PONG).relatedNode(node)
            .expirationPeriod(requestTimeout).attemptNumber(attempt).build();

    pendingPingRequests.put(nodeMessage.getMessageId(), request);
    return nodeMessage;
}

From source file:com.yahoo.gondola.tsunami.Tsunami.java

void setup() throws Exception {
    // Create gondola instances
    for (int i = 0; i < hostIds.size(); i++) {
        String[] split = hostIds.get(i).split(":");
        String hostId = split[0];
        String cliPort = split[1];
        InetSocketAddress addr = config.getAddressForHost(hostId);

        // Initialize the instance
        agents[i] = new AgentClient(hostId, addr.getHostName(), 1200,
                new CliClient(addr.getHostName(), Integer.parseInt(cliPort), 60000)); // 1m timeout
        //agents[i].createInstance();
    }/* w  ww.j a  va 2  s . co m*/
}