Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_HTTP_ADDRESS_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_HTTP_ADDRESS_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_HTTP_ADDRESS_KEY.

Prototype

String DFS_DATANODE_HTTP_ADDRESS_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_HTTP_ADDRESS_KEY.

Click Source Link

Usage

From source file:com.cloudera.impala.service.JniFrontend.java

License:Apache License

/**
 *  Checks the data node's server side configuration by reading the CONF from the data
 *  node./*from w ww . j  ava2s . com*/
 *  This appends error messages to errorCause prefixed by prefix if data node
 *  configuration is not properly set.
 */
private void cdh41ShortCircuitReadDatanodeCheck(StringBuilder errorCause, String prefix) {
    String dnWebUiAddr = CONF.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT);
    URL dnWebUiUrl = null;
    try {
        dnWebUiUrl = new URL("http://" + dnWebUiAddr + "/conf");
    } catch (Exception e) {
        LOG.info(e.toString());
    }
    Configuration dnConf = new Configuration(false);
    dnConf.addResource(dnWebUiUrl);

    // dfs.datanode.data.dir.perm should be at least 750
    int permissionInt = 0;
    try {
        String permission = dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
                DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT);
        permissionInt = Integer.parseInt(permission);
    } catch (Exception e) {
    }
    if (permissionInt < 750) {
        errorCause.append(prefix);
        errorCause.append("Data node configuration ");
        errorCause.append(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
        errorCause.append(" is not properly set. It should be set to 750.\n");
    }

    // dfs.block.local-path-access.user should contain the user account impala is running
    // under
    String accessUser = dnConf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
    if (accessUser == null || !accessUser.contains(System.getProperty("user.name"))) {
        errorCause.append(prefix);
        errorCause.append("Data node configuration ");
        errorCause.append(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
        errorCause.append(" is not properly set. It should contain ");
        errorCause.append(System.getProperty("user.name"));
        errorCause.append("\n");
    }
}

From source file:com.uber.hoodie.common.minicluster.HdfsTestService.java

License:Apache License

/**
 * Configure the DFS Cluster before launching it.
 *
 * @param config           The already created Hadoop configuration we'll further configure for HDFS
 * @param localDFSLocation The location on the local filesystem where cluster data is stored
 * @param bindIP           An IP address we want to force the datanode and namenode to bind to.
 * @return The updated Configuration object.
 *//*from   w  w  w .ja v a 2s .  c  om*/
private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP,
        int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort,
        int datanodeHttpPort) {

    logger.info("HDFS force binding to ip: " + bindIP);
    config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort);
    config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":" + datanodeHttpPort);
    // When a datanode registers with the namenode, the Namenode do a hostname
    // check of the datanode which will fail on OpenShift due to reverse DNS
    // issues with the internal IP addresses. This config disables that check,
    // and will allow a datanode to connect regardless.
    config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check", false);
    config.set("hdfs.minidfs.basedir", localDFSLocation);
    // allow current user to impersonate others
    String user = System.getProperty("user.name");
    config.set("hadoop.proxyuser." + user + ".groups", "*");
    config.set("hadoop.proxyuser." + user + ".hosts", "*");
    return config;
}

From source file:org.kitesdk.minicluster.HdfsService.java

License:Apache License

/**
 * Configure the DFS Cluster before launching it.
 * /*  ww w  . ja v a  2  s.  c om*/
 * @param config
 *          The already created Hadoop configuration we'll further configure
 *          for HDFS
 * @param localDFSLocation
 *          The location on the local filesystem where cluster data is stored
 * @param bindIP
 *          An IP address we want to force the datanode and namenode to bind
 *          to.
 * @param namenodeRpcPort
 * @param namenodeHttpPort
 * @param datanodePort
 * @param datanodeIpcPort
 * @param datanodeHttpPort
 * @return The updated Configuration object.
 */
private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP,
        int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort,
        int datanodeHttpPort) {

    logger.info("HDFS force binding to ip: " + bindIP);
    config = new KiteCompatibleConfiguration(config, bindIP, namenodeRpcPort, namenodeHttpPort);
    config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort);
    config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort);
    config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":" + datanodeHttpPort);
    // When a datanode registers with the namenode, the Namenode do a hostname
    // check of the datanode which will fail on OpenShift due to reverse DNS
    // issues with the internal IP addresses. This config disables that check,
    // and will allow a datanode to connect regardless.
    config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check", false);
    config.set("hdfs.minidfs.basedir", localDFSLocation);
    // allow current user to impersonate others
    String user = System.getProperty("user.name");
    config.set("hadoop.proxyuser." + user + ".groups", "*");
    config.set("hadoop.proxyuser." + user + ".hosts", "*");
    return config;
}