List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_DATA_DIR_PERMISSION_KEY
String DFS_DATANODE_DATA_DIR_PERMISSION_KEY
To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_DATANODE_DATA_DIR_PERMISSION_KEY.
Click Source Link
From source file:com.cloudera.impala.service.JniFrontend.java
License:Apache License
/** * Checks the data node's server side configuration by reading the CONF from the data * node./*w w w. ja v a2s. c om*/ * This appends error messages to errorCause prefixed by prefix if data node * configuration is not properly set. */ private void cdh41ShortCircuitReadDatanodeCheck(StringBuilder errorCause, String prefix) { String dnWebUiAddr = CONF.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT); URL dnWebUiUrl = null; try { dnWebUiUrl = new URL("http://" + dnWebUiAddr + "/conf"); } catch (Exception e) { LOG.info(e.toString()); } Configuration dnConf = new Configuration(false); dnConf.addResource(dnWebUiUrl); // dfs.datanode.data.dir.perm should be at least 750 int permissionInt = 0; try { String permission = dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT); permissionInt = Integer.parseInt(permission); } catch (Exception e) { } if (permissionInt < 750) { errorCause.append(prefix); errorCause.append("Data node configuration "); errorCause.append(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY); errorCause.append(" is not properly set. It should be set to 750.\n"); } // dfs.block.local-path-access.user should contain the user account impala is running // under String accessUser = dnConf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); if (accessUser == null || !accessUser.contains(System.getProperty("user.name"))) { errorCause.append(prefix); errorCause.append("Data node configuration "); errorCause.append(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); errorCause.append(" is not properly set. It should contain "); errorCause.append(System.getProperty("user.name")); errorCause.append("\n"); } }
From source file:common.DataNode.java
License:Apache License
/** * Make an instance of DataNode after ensuring that at least one of the * given data directories (and their parent directories, if necessary) * can be created.// w w w .java 2s . co m * @param dataDirs List of directories, where the new DataNode instance should * keep its files. * @param conf Configuration instance to use. * @return DataNode instance for given list of data dirs and conf, or null if * no directory from this directory list can be created. * @throws IOException */ static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf) throws IOException { LocalFileSystem localFS = FileSystem.getLocal(conf); FsPermission permission = new FsPermission(conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT)); ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission); if (dirs.size() > 0) { return new DataNode(conf, dirs); } LOG.error("All directories in " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + " are invalid."); return null; }
From source file:hdfs.MiniHDFS.java
License:Apache License
public static void main(String[] args) throws Exception { if (args.length != 1) { throw new IllegalArgumentException("MiniHDFS <baseDirectory>"); }/*from ww w . j av a 2 s .co m*/ // configure Paths Path baseDir = Paths.get(args[0]); // hadoop-home/, so logs will not complain if (System.getenv("HADOOP_HOME") == null) { Path hadoopHome = baseDir.resolve("hadoop-home"); Files.createDirectories(hadoopHome); System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString()); } // hdfs-data/, where any data is going Path hdfsHome = baseDir.resolve("hdfs-data"); // start cluster Configuration cfg = new Configuration(); cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString()); // lower default permission: TODO: needed? cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766"); // TODO: remove hardcoded port! MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build(); // write our PID file Path tmp = Files.createTempFile(baseDir, null, null); String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0]; Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8)); Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); // write our port file tmp = Files.createTempFile(baseDir, null, null); Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8)); Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE); }