Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_IPC_ADDRESS_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_IPC_ADDRESS_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_IPC_ADDRESS_KEY.

Prototype

String DFS_DATANODE_IPC_ADDRESS_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_IPC_ADDRESS_KEY.

Click Source Link

Usage

From source file:com.uber.hoodie.common.minicluster.HdfsTestService.java

License:Apache License

/**
 * Configure the DFS Cluster before launching it.
 *
 * @param config           The already created Hadoop configuration we'll further configure for HDFS
 * @param localDFSLocation The location on the local filesystem where cluster data is stored
 * @param bindIP           An IP address we want to force the datanode and namenode to bind to.
 * @return The updated Configuration object.
 *//*from ww w .j a va  2s. c  o m*/
private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP,
        int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort,
        int datanodeHttpPort) {

    logger.info("HDFS force binding to ip: " + bindIP);
    config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort);
    config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":" + datanodeHttpPort);
    // When a datanode registers with the namenode, the Namenode do a hostname
    // check of the datanode which will fail on OpenShift due to reverse DNS
    // issues with the internal IP addresses. This config disables that check,
    // and will allow a datanode to connect regardless.
    config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check", false);
    config.set("hdfs.minidfs.basedir", localDFSLocation);
    // allow current user to impersonate others
    String user = System.getProperty("user.name");
    config.set("hadoop.proxyuser." + user + ".groups", "*");
    config.set("hadoop.proxyuser." + user + ".hosts", "*");
    return config;
}

From source file:org.kitesdk.minicluster.HdfsService.java

License:Apache License

/**
 * Configure the DFS Cluster before launching it.
 * /*from  ww  w  . j  a v a 2s  . co  m*/
 * @param config
 *          The already created Hadoop configuration we'll further configure
 *          for HDFS
 * @param localDFSLocation
 *          The location on the local filesystem where cluster data is stored
 * @param bindIP
 *          An IP address we want to force the datanode and namenode to bind
 *          to.
 * @param namenodeRpcPort
 * @param namenodeHttpPort
 * @param datanodePort
 * @param datanodeIpcPort
 * @param datanodeHttpPort
 * @return The updated Configuration object.
 */
private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP,
        int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort,
        int datanodeHttpPort) {

    logger.info("HDFS force binding to ip: " + bindIP);
    config = new KiteCompatibleConfiguration(config, bindIP, namenodeRpcPort, namenodeHttpPort);
    config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort);
    config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":" + datanodeHttpPort);
    // When a datanode registers with the namenode, the Namenode do a hostname
    // check of the datanode which will fail on OpenShift due to reverse DNS
    // issues with the internal IP addresses. This config disables that check,
    // and will allow a datanode to connect regardless.
    config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check", false);
    config.set("hdfs.minidfs.basedir", localDFSLocation);
    // allow current user to impersonate others
    String user = System.getProperty("user.name");
    config.set("hadoop.proxyuser." + user + ".groups", "*");
    config.set("hadoop.proxyuser." + user + ".hosts", "*");
    return config;
}