List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_ADDRESS_KEY
String DFS_DATANODE_ADDRESS_KEY
To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_ADDRESS_KEY.
Click Source Link
From source file:com.uber.hoodie.common.minicluster.HdfsTestService.java
License:Apache License
/** * Configure the DFS Cluster before launching it. * * @param config The already created Hadoop configuration we'll further configure for HDFS * @param localDFSLocation The location on the local filesystem where cluster data is stored * @param bindIP An IP address we want to force the datanode and namenode to bind to. * @return The updated Configuration object. *//*from w w w.j a va2s .co m*/ private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP, int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort, int datanodeHttpPort) { logger.info("HDFS force binding to ip: " + bindIP); config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort); config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort); config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort); config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":" + datanodeHttpPort); // When a datanode registers with the namenode, the Namenode do a hostname // check of the datanode which will fail on OpenShift due to reverse DNS // issues with the internal IP addresses. This config disables that check, // and will allow a datanode to connect regardless. config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check", false); config.set("hdfs.minidfs.basedir", localDFSLocation); // allow current user to impersonate others String user = System.getProperty("user.name"); config.set("hadoop.proxyuser." + user + ".groups", "*"); config.set("hadoop.proxyuser." + user + ".hosts", "*"); return config; }
From source file:org.kitesdk.minicluster.HdfsService.java
License:Apache License
/** * Configure the DFS Cluster before launching it. * //from w w w . ja va 2 s . c om * @param config * The already created Hadoop configuration we'll further configure * for HDFS * @param localDFSLocation * The location on the local filesystem where cluster data is stored * @param bindIP * An IP address we want to force the datanode and namenode to bind * to. * @param namenodeRpcPort * @param namenodeHttpPort * @param datanodePort * @param datanodeIpcPort * @param datanodeHttpPort * @return The updated Configuration object. */ private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP, int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort, int datanodeHttpPort) { logger.info("HDFS force binding to ip: " + bindIP); config = new KiteCompatibleConfiguration(config, bindIP, namenodeRpcPort, namenodeHttpPort); config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort); config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort); config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort); config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":" + datanodeHttpPort); // When a datanode registers with the namenode, the Namenode do a hostname // check of the datanode which will fail on OpenShift due to reverse DNS // issues with the internal IP addresses. This config disables that check, // and will allow a datanode to connect regardless. config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check", false); config.set("hdfs.minidfs.basedir", localDFSLocation); // allow current user to impersonate others String user = System.getProperty("user.name"); config.set("hadoop.proxyuser." + user + ".groups", "*"); config.set("hadoop.proxyuser." + user + ".hosts", "*"); return config; }