List of usage examples for org.apache.hadoop.fs FileSystem setPermission
public void setPermission(Path p, FsPermission permission) throws IOException
From source file:org.apache.sentry.hdfs.TestSentryINodeAttributesProvider.java
License:Apache License
@Test public void testProvider() throws Exception { admin.doAs(new PrivilegedExceptionAction<Void>() { @Override// w w w. j av a2s . c om public Void run() throws Exception { String sysUser = UserGroupInformation.getCurrentUser().getShortUserName(); FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); List<AclEntry> baseAclList = new ArrayList<AclEntry>(); AclEntry.Builder builder = new AclEntry.Builder(); baseAclList.add(builder.setType(AclEntryType.USER).setScope(AclEntryScope.ACCESS).build()); baseAclList.add(builder.setType(AclEntryType.GROUP).setScope(AclEntryScope.ACCESS).build()); baseAclList.add(builder.setType(AclEntryType.OTHER).setScope(AclEntryScope.ACCESS).build()); Path path1 = new Path("/user/authz/obj/xxx"); fs.mkdirs(path1); fs.setAcl(path1, baseAclList); fs.mkdirs(new Path("/user/authz/xxx")); fs.mkdirs(new Path("/user/xxx")); // root Path path = new Path("/"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir before prefixes path = new Path("/user"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // prefix dir path = new Path("/user/authz"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir inside of prefix, no obj path = new Path("/user/authz/xxx"); FileStatus status = fs.getFileStatus(path); Assert.assertEquals(sysUser, status.getOwner()); Assert.assertEquals("supergroup", status.getGroup()); Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir inside of prefix, obj path = new Path("/user/authz/obj"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); List<AclEntry> acls = new ArrayList<AclEntry>(); acls.add(new AclEntry.Builder().setName(sysUser).setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build()); acls.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build()); acls.add(new AclEntry.Builder().setName("user-authz").setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build()); Assert.assertEquals(new LinkedHashSet<AclEntry>(acls), new LinkedHashSet<AclEntry>(fs.getAclStatus(path).getEntries())); // dir inside of prefix, inside of obj path = new Path("/user/authz/obj/xxx"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); Path path2 = new Path("/user/authz/obj/path2"); fs.mkdirs(path2); fs.setAcl(path2, baseAclList); // dir outside of prefix path = new Path("/user/xxx"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); //stale and dir inside of prefix, obj System.setProperty("test.stale", "true"); path = new Path("/user/authz/xxx"); status = fs.getFileStatus(path); Assert.assertEquals(sysUser, status.getOwner()); Assert.assertEquals("supergroup", status.getGroup()); Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // setPermission sets the permission for dir outside of prefix. // setUser/setGroup sets the user/group for dir outside of prefix. Path pathOutside = new Path("/user/xxx"); fs.setPermission(pathOutside, new FsPermission((short) 0000)); Assert.assertEquals(new FsPermission((short) 0000), fs.getFileStatus(pathOutside).getPermission()); fs.setOwner(pathOutside, sysUser, "supergroup"); Assert.assertEquals(sysUser, fs.getFileStatus(pathOutside).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(pathOutside).getGroup()); // removeAcl removes the ACL entries for dir outside of prefix. List<AclEntry> aclsOutside = new ArrayList<AclEntry>(baseAclList); List<AclEntry> acl = new ArrayList<AclEntry>(); acl.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build()); aclsOutside.addAll(acl); fs.setAcl(pathOutside, aclsOutside); fs.removeAclEntries(pathOutside, acl); Assert.assertFalse(fs.getAclStatus(pathOutside).getEntries().containsAll(acl)); // setPermission sets the permission for dir inside of prefix but not a hive obj. // setUser/setGroup sets the user/group for dir inside of prefix but not a hive obj. Path pathInside = new Path("/user/authz/xxx"); fs.setPermission(pathInside, new FsPermission((short) 0000)); Assert.assertEquals(new FsPermission((short) 0000), fs.getFileStatus(pathInside).getPermission()); fs.setOwner(pathInside, sysUser, "supergroup"); Assert.assertEquals(sysUser, fs.getFileStatus(pathInside).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(pathInside).getGroup()); // removeAcl is a no op for dir inside of prefix. Assert.assertTrue(fs.getAclStatus(pathInside).getEntries().isEmpty()); fs.removeAclEntries(pathInside, acl); Assert.assertTrue(fs.getAclStatus(pathInside).getEntries().isEmpty()); // setPermission/setUser/setGroup is a no op for dir inside of prefix, and is a hive obj. Path pathInsideAndHive = new Path("/user/authz/obj"); fs.setPermission(pathInsideAndHive, new FsPermission((short) 0000)); Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(pathInsideAndHive).getPermission()); fs.setOwner(pathInsideAndHive, sysUser, "supergroup"); Assert.assertEquals("hive", fs.getFileStatus(pathInsideAndHive).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(pathInsideAndHive).getGroup()); return null; } }); }
From source file:org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.java
License:Apache License
public static HiveServer create(HiveServer2Type type, Map<String, String> properties, File baseDir, File confDir, File logDir, String policyFile, FileSystem fileSystem) throws Exception { if (type.equals(HiveServer2Type.UnmanagedHiveServer2)) { LOGGER.info("Creating UnmanagedHiveServer"); return new UnmanagedHiveServer(); }//from w w w.j a v a2 s . com if (!properties.containsKey(WAREHOUSE_DIR)) { LOGGER.info("fileSystem " + fileSystem.getClass().getSimpleName()); if (fileSystem instanceof DistributedFileSystem) { @SuppressWarnings("static-access") String dfsUri = fileSystem.getDefaultUri(fileSystem.getConf()).toString(); LOGGER.info("dfsUri " + dfsUri); properties.put(WAREHOUSE_DIR, dfsUri + "/data"); fileSystem.mkdirs(new Path("/data/"), new FsPermission((short) 0777)); } else { properties.put(WAREHOUSE_DIR, new File(baseDir, "warehouse").getPath()); fileSystem.mkdirs(new Path("/", "warehouse"), new FsPermission((short) 0777)); } } Boolean policyOnHDFS = Boolean.valueOf(System.getProperty("sentry.e2etest.policyonhdfs", "false")); if (policyOnHDFS) { // Initialize "hive.exec.scratchdir", according the description of // "hive.exec.scratchdir", the permission should be (733). // <description>HDFS root scratch dir for Hive jobs which gets created with write // all (733) permission. For each connecting user, an HDFS scratch dir: // ${hive.exec.scratchdir}/<username> is created, // with ${hive.scratch.dir.permission}.</description> fileSystem.mkdirs(new Path("/tmp/hive/")); fileSystem.setPermission(new Path("/tmp/hive/"), new FsPermission((short) 0733)); } else { LOGGER.info("Setting an readable path to hive.exec.scratchdir"); properties.put("hive.exec.scratchdir", new File(baseDir, "scratchdir").getPath()); } if (!properties.containsKey(METASTORE_CONNECTION_URL)) { properties.put(METASTORE_CONNECTION_URL, String.format("jdbc:derby:;databaseName=%s;create=true;createDatabaseIfNotExist=true", new File(baseDir, "metastore").getPath())); properties.put("datanucleus.schema.autoCreateTables", "true"); } if (!properties.containsKey(ACCESS_TESTING_MODE)) { properties.put(ACCESS_TESTING_MODE, "true"); } if (!properties.containsKey(AUTHZ_PROVIDER_RESOURCE)) { LOGGER.info("Policy File location: " + policyFile); properties.put(AUTHZ_PROVIDER_RESOURCE, policyFile); } if (!properties.containsKey(AUTHZ_PROVIDER)) { properties.put(AUTHZ_PROVIDER, LocalGroupResourceAuthorizationProvider.class.getName()); } if (!properties.containsKey(AUTHZ_SERVER_NAME)) { properties.put(AUTHZ_SERVER_NAME, DEFAULT_AUTHZ_SERVER_NAME); } if (!properties.containsKey(HS2_PORT)) { properties.put(HS2_PORT, String.valueOf(findPort())); } if (!properties.containsKey(SUPPORT_CONCURRENCY)) { properties.put(SUPPORT_CONCURRENCY, "false"); } if (!properties.containsKey(HADOOPBIN)) { properties.put(HADOOPBIN, "./target/test-classes/hadoop"); } // Modify the test resource to have executable permission java.nio.file.Path hadoopPath = FileSystems.getDefault().getPath("target/test-classes", "hadoop"); if (hadoopPath != null) { hadoopPath.toFile().setExecutable(true); } if (HiveServer2Type.InternalMetastore.equals(type)) { // The configuration sentry.metastore.service.users is for the user who // has all access to get the metadata. properties.put(METASTORE_BYPASS, "accessAllMetaUser"); if (!properties.containsKey(METASTORE_URI)) { properties.put(METASTORE_URI, "thrift://localhost:" + String.valueOf(findPort())); } if (!properties.containsKey(METASTORE_HOOK)) { properties.put(METASTORE_HOOK, "org.apache.sentry.binding.metastore.MetastoreAuthzBinding"); } properties.put(ConfVars.METASTORESERVERMINTHREADS.varname, "5"); } properties.put(ConfVars.HIVE_AUTHORIZATION_ENABLED.varname, "true"); properties.put(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, SentryHiveAuthorizerFactory.class.getName()); // CBO has a bug on Hive 2.0.0 with VIEWS because ReadIdentity objects are sent without // parent information for partitioned columns properties.put(ConfVars.HIVE_CBO_ENABLED.varname, "false"); // Hive 2.x set the following configuration to TRUE by default and it causes test issues on // Sentry because we're trying to change columns with different column types properties.put(ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES.varname, "false"); // This configuration will avoid starting the HS2 WebUI that was causing test failures when // HS2 is configured for concurrency properties.put(ConfVars.HIVE_IN_TEST.varname, "true"); // This configuration is used by SentryHiveAuthorizerFactory to change the client type // to HIVESERVER2 if we're using the authorization V2 in test mode. properties.put(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE.varname, "true"); // Sets the hadoop temporary directory specified by the java.io.tmpdir (already set to the // maven build directory to avoid writing to the /tmp directly String hadoopTempDir = System.getProperty("java.io.tmpdir") + File.separator + "hadoop-tmp"; properties.put("hadoop.tmp.dir", hadoopTempDir); // This configuration will avoid that the HMS fails if the metastore schema has not version // information. For some reason, HMS does not set a version initially on our tests. properties.put(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false"); // Disable join cartesian checks to allow Sentry tests to pass properties.put(ConfVars.HIVE_STRICT_CHECKS_CARTESIAN.varname, "false"); // Disable capability checks (these checks do not work when Hive is in testing mode) properties.put(ConfVars.METASTORE_CAPABILITY_CHECK.varname, "false"); if (!properties.containsKey(METASTORE_BYPASS)) { properties.put(METASTORE_BYPASS, "hive,impala," + System.getProperty("user.name", "")); } else { String tempByPass = properties.get(METASTORE_BYPASS); tempByPass = "hive,impala," + System.getProperty("user.name", "") + "," + tempByPass; properties.put(METASTORE_BYPASS, tempByPass); } if (!properties.containsKey(HiveAuthzConf.AuthzConfVars.AUTHZ_SERVER_NAME.getVar())) { properties.put(HiveAuthzConf.AuthzConfVars.AUTHZ_SERVER_NAME.getVar(), "server1"); } properties.put(METASTORE_SETUGI, "true"); properties.put(METASTORE_CLIENT_TIMEOUT, "100"); properties.put(ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true"); properties.put(ConfVars.HIVESTATSAUTOGATHER.varname, "false"); properties.put(ConfVars.HIVE_STATS_COLLECT_SCANCOLS.varname, "true"); String hadoopBinPath = properties.get(HADOOPBIN); Assert.assertNotNull(hadoopBinPath, "Hadoop Bin"); File hadoopBin = new File(hadoopBinPath); if (!hadoopBin.isFile()) { Assert.fail("Path to hadoop bin " + hadoopBin.getPath() + " is invalid. " + "Perhaps you missed the download-hadoop profile."); } /* * This hack, setting the hiveSiteURL field removes a previous hack involving * setting of system properties for each property. Although both are hacks, * I prefer this hack because once the system properties are set they can * affect later tests unless those tests clear them. This hack allows for * a clean switch to a new set of defaults when a new HiveConf object is created. */ Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(null); HiveConf hiveConf = new HiveConf(); HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml")); for (Map.Entry<String, String> entry : properties.entrySet()) { LOGGER.info(entry.getKey() + " => " + entry.getValue()); hiveConf.set(entry.getKey(), entry.getValue()); authzConf.set(entry.getKey(), entry.getValue()); } File hiveSite = new File(confDir, "hive-site.xml"); File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE); OutputStream out = new FileOutputStream(accessSite); authzConf.writeXml(out); out.close(); // points hive-site.xml at access-site.xml hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, "file:///" + accessSite.getPath()); if (!properties.containsKey(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname)) { hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK.varname, "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook"); } hiveConf.set(HIVESERVER2_IMPERSONATION, "false"); out = new FileOutputStream(hiveSite); hiveConf.writeXml(out); out.close(); Reflection.staticField("hiveSiteURL").ofType(URL.class).in(HiveConf.class).set(hiveSite.toURI().toURL()); switch (type) { case EmbeddedHiveServer2: LOGGER.info("Creating EmbeddedHiveServer"); return new EmbeddedHiveServer(); case InternalHiveServer2: LOGGER.info("Creating InternalHiveServer"); return new InternalHiveServer(hiveConf); case InternalMetastore: LOGGER.info("Creating InternalMetastoreServer"); return new InternalMetastoreServer(hiveConf); case ExternalHiveServer2: LOGGER.info("Creating ExternalHiveServer"); return new ExternalHiveServer(hiveConf, confDir, logDir); default: throw new UnsupportedOperationException(type.name()); } }
From source file:org.apache.solr.hadoop.hack.MiniMRClientClusterFactory.java
License:Apache License
public static MiniMRClientCluster create(Class<?> caller, String identifier, int noOfNMs, Configuration conf, File testWorkDir) throws IOException { if (conf == null) { conf = new Configuration(); }//from ww w . ja va 2 s . co m FileSystem fs = FileSystem.get(conf); Path testRootDir = new Path(testWorkDir.getPath(), identifier + "-tmpDir").makeQualified(fs); Path appJar = new Path(testRootDir, "MRAppJar.jar"); // Copy MRAppJar and make it private. Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR); fs.copyFromLocalFile(appMasterJar, appJar); fs.setPermission(appJar, new FsPermission("744")); Job job = Job.getInstance(conf); job.addFileToClassPath(appJar); Path callerJar = new Path(JarFinder.getJar(caller)); Path remoteCallerJar = new Path(testRootDir, callerJar.getName()); fs.copyFromLocalFile(callerJar, remoteCallerJar); fs.setPermission(remoteCallerJar, new FsPermission("744")); job.addFileToClassPath(remoteCallerJar); MiniMRYarnCluster miniMRYarnCluster; try { miniMRYarnCluster = new MiniMRYarnCluster(identifier, noOfNMs, testWorkDir); } catch (Exception e) { throw new RuntimeException(e); } job.getConfiguration().set("minimrclientcluster.caller.name", identifier); job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number", noOfNMs); miniMRYarnCluster.init(job.getConfiguration()); miniMRYarnCluster.start(); return new MiniMRYarnClusterAdapter(miniMRYarnCluster, testWorkDir); }
From source file:org.apache.solr.hadoop.MorphlineBasicMiniMRTest.java
License:Apache License
@BeforeClass public static void setupClass() throws Exception { if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target"); }/*w ww . jav a2 s . c o m*/ int taskTrackers = 2; int dataNodes = 2; // String proxyUser = System.getProperty("user.name"); // String proxyGroup = "g"; // StringBuilder sb = new StringBuilder(); // sb.append("127.0.0.1,localhost"); // for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) { // sb.append(",").append(i.getCanonicalHostName()); // } System.setProperty("solr.hdfs.blockcache.enabled", "false"); JobConf conf = new JobConf(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); String nnURI = fileSystem.getUri().toString(); int numDirs = 1; String[] racks = null; String[] hosts = null; mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
From source file:org.apache.solr.hadoop.MorphlineGoLiveMiniMRTest.java
License:Apache License
@BeforeClass public static void setupClass() throws Exception { // if (isYarn()) { // org.junit.Assume.assumeTrue(false); // ignore test on Yarn until CDH-10420 is fixed // }/* w w w.ja va2 s . co m*/ if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target"); } int taskTrackers = 2; int dataNodes = 2; System.setProperty("solr.hdfs.blockcache.enabled", "false"); JobConf conf = new JobConf(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); createTempDir(); System.setProperty("test.build.data", dataDir + File.separator + "hdfs" + File.separator + "build"); System.setProperty("test.cache.data", dataDir + File.separator + "hdfs" + File.separator + "cache"); dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); String nnURI = fileSystem.getUri().toString(); int numDirs = 1; String[] racks = null; String[] hosts = null; mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
From source file:org.apache.sqoop.mapreduce.HBaseBulkImportJob.java
License:Apache License
/** * Set the file permission of the path of the given fileStatus. If the path * is a directory, apply permission recursively to all subdirectories and * files.//from w w w . j a v a 2 s . c o m * * @param fs the filesystem * @param fileStatus containing the path * @param permission the permission * @throws java.io.IOException */ private void setPermission(FileSystem fs, FileStatus fileStatus, FsPermission permission) throws IOException { if (fileStatus.isDir()) { for (FileStatus file : fs.listStatus(fileStatus.getPath())) { setPermission(fs, file, permission); } } fs.setPermission(fileStatus.getPath(), permission); }
From source file:org.apache.tajo.master.querymaster.QueryMasterTask.java
License:Apache License
/** * It initializes the final output and staging directory and sets * them to variables./* w w w . j av a 2 s .c om*/ */ public static Path initStagingDir(TajoConf conf, String queryId, QueryContext context) throws IOException { String realUser; String currentUser; UserGroupInformation ugi; ugi = UserGroupInformation.getLoginUser(); realUser = ugi.getShortUserName(); currentUser = UserGroupInformation.getCurrentUser().getShortUserName(); FileSystem fs; Path stagingDir; //////////////////////////////////////////// // Create Output Directory //////////////////////////////////////////// String outputPath = context.get(QueryVars.OUTPUT_TABLE_PATH, ""); if (context.isCreateTable() || context.isInsert()) { if (outputPath == null || outputPath.isEmpty()) { // hbase stagingDir = new Path(TajoConf.getDefaultRootStagingDir(conf), queryId); } else { stagingDir = StorageUtil.concatPath(context.getOutputPath(), TMP_STAGING_DIR_PREFIX, queryId); } } else { stagingDir = new Path(TajoConf.getDefaultRootStagingDir(conf), queryId); } // initializ fs = stagingDir.getFileSystem(conf); if (fs.exists(stagingDir)) { throw new IOException("The staging directory '" + stagingDir + "' already exists"); } fs.mkdirs(stagingDir, new FsPermission(STAGING_DIR_PERMISSION)); FileStatus fsStatus = fs.getFileStatus(stagingDir); String owner = fsStatus.getOwner(); if (!owner.isEmpty() && !(owner.equals(currentUser) || owner.equals(realUser))) { throw new IOException("The ownership on the user's query " + "directory " + stagingDir + " is not as expected. " + "It is owned by " + owner + ". The directory must " + "be owned by the submitter " + currentUser + " or " + "by " + realUser); } if (!fsStatus.getPermission().equals(STAGING_DIR_PERMISSION)) { LOG.info("Permissions on staging directory " + stagingDir + " are " + "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " + "to correct value " + STAGING_DIR_PERMISSION); fs.setPermission(stagingDir, new FsPermission(STAGING_DIR_PERMISSION)); } Path stagingResultDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME); fs.mkdirs(stagingResultDir); return stagingDir; }
From source file:org.apache.tajo.master.QueryMaster.java
License:Apache License
/** * It initializes the final output and staging directory and sets * them to variables.//ww w. j a v a 2s. c o m */ private void initStagingDir() throws IOException { QueryConf conf = getContext().getConf(); String realUser; String currentUser; UserGroupInformation ugi; ugi = UserGroupInformation.getLoginUser(); realUser = ugi.getShortUserName(); currentUser = UserGroupInformation.getCurrentUser().getShortUserName(); String givenOutputTableName = conf.getOutputTable(); Path stagingDir; // If final output directory is not given by an user, // we use the query id as a output directory. if (givenOutputTableName.equals("")) { this.isCreateTableStmt = false; FileSystem defaultFS = FileSystem.get(conf); Path homeDirectory = defaultFS.getHomeDirectory(); if (!defaultFS.exists(homeDirectory)) { defaultFS.mkdirs(homeDirectory, new FsPermission(USER_DIR_PERMISSION)); } Path userQueryDir = new Path(homeDirectory, TajoConstants.USER_QUERYDIR_PREFIX); if (defaultFS.exists(userQueryDir)) { FileStatus fsStatus = defaultFS.getFileStatus(userQueryDir); String owner = fsStatus.getOwner(); if (!(owner.equals(currentUser) || owner.equals(realUser))) { throw new IOException("The ownership on the user's query " + "directory " + userQueryDir + " is not as expected. " + "It is owned by " + owner + ". The directory must " + "be owned by the submitter " + currentUser + " or " + "by " + realUser); } if (!fsStatus.getPermission().equals(USER_DIR_PERMISSION)) { LOG.info("Permissions on staging directory " + userQueryDir + " are " + "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " + "to correct value " + USER_DIR_PERMISSION); defaultFS.setPermission(userQueryDir, new FsPermission(USER_DIR_PERMISSION)); } } else { defaultFS.mkdirs(userQueryDir, new FsPermission(USER_DIR_PERMISSION)); } stagingDir = StorageUtil.concatPath(userQueryDir, queryId.toString()); if (defaultFS.exists(stagingDir)) { throw new IOException("The staging directory " + stagingDir + "already exists. The directory must be unique to each query"); } else { defaultFS.mkdirs(stagingDir, new FsPermission(USER_DIR_PERMISSION)); } // Set the query id to the output table name conf.setOutputTable(queryId.toString()); } else { this.isCreateTableStmt = true; Path warehouseDir = new Path(conf.getVar(TajoConf.ConfVars.ROOT_DIR), TajoConstants.WAREHOUSE_DIR); stagingDir = new Path(warehouseDir, conf.getOutputTable()); FileSystem fs = warehouseDir.getFileSystem(conf); if (fs.exists(stagingDir)) { throw new IOException("The staging directory " + stagingDir + " already exists. The directory must be unique to each query"); } else { // TODO - should have appropriate permission fs.mkdirs(stagingDir, new FsPermission(USER_DIR_PERMISSION)); } } conf.setOutputPath(stagingDir); outputPath = stagingDir; LOG.info("Initialized Query Staging Dir: " + outputPath); }
From source file:org.apache.tez.client.TezClientUtils.java
License:Apache License
/** * Verify or create the Staging area directory on the configured Filesystem * @param stagingArea Staging area directory path * @return the FileSytem for the staging area directory * @throws IOException// w w w .ja v a 2 s. co m */ public static FileSystem ensureStagingDirExists(Configuration conf, Path stagingArea) throws IOException { FileSystem fs = stagingArea.getFileSystem(conf); String realUser; String currentUser; UserGroupInformation ugi = UserGroupInformation.getLoginUser(); realUser = ugi.getShortUserName(); currentUser = UserGroupInformation.getCurrentUser().getShortUserName(); if (fs.exists(stagingArea)) { FileStatus fsStatus = fs.getFileStatus(stagingArea); String owner = fsStatus.getOwner(); if (!(owner.equals(currentUser) || owner.equals(realUser))) { throw new IOException("The ownership on the staging directory " + stagingArea + " is not as expected. " + "It is owned by " + owner + ". The directory must " + "be owned by the submitter " + currentUser + " or " + "by " + realUser); } if (!fsStatus.getPermission().equals(TezCommonUtils.TEZ_AM_DIR_PERMISSION)) { LOG.info("Permissions on staging directory " + stagingArea + " are " + "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " + "to correct value " + TezCommonUtils.TEZ_AM_DIR_PERMISSION); fs.setPermission(stagingArea, TezCommonUtils.TEZ_AM_DIR_PERMISSION); } } else { TezCommonUtils.mkDirForAM(fs, stagingArea); } return fs; }
From source file:org.apache.tez.test.MiniTezCluster.java
License:Apache License
@Override public void serviceInit(Configuration conf) throws Exception { conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME); // Use libs from cluster since no build is available conf.setBoolean(TezConfiguration.TEZ_USE_CLUSTER_HADOOP_LIBS, true); // blacklisting disabled to prevent scheduling issues conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false); if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) { conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath()); }/*from w w w. j av a 2 s .c o m*/ if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) { // nothing defined. set quick delete value conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l); } File appJarLocalFile = new File(MiniTezCluster.APPJAR); if (!appJarLocalFile.exists()) { String message = "TezAppJar " + MiniTezCluster.APPJAR + " not found. Exiting."; LOG.info(message); throw new TezUncheckedException(message); } else { LOG.info("Using Tez AppJar: " + appJarLocalFile.getAbsolutePath()); } FileSystem fs = FileSystem.get(conf); Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir")); Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar"); // Copy AppJar and make it public. Path appMasterJar = new Path(MiniTezCluster.APPJAR); fs.copyFromLocalFile(appMasterJar, appRemoteJar); fs.setPermission(appRemoteJar, new FsPermission("777")); conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString()); LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS)); // VMEM monitoring disabled, PMEM monitoring enabled. conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000"); try { Path stagingPath = FileContext.getFileContext(conf) .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR))); /* * Re-configure the staging path on Windows if the file system is localFs. * We need to use a absolute path that contains the drive letter. The unit * test could run on a different drive than the AM. We can run into the * issue that job files are localized to the drive where the test runs on, * while the AM starts on a different drive and fails to find the job * metafiles. Using absolute path can avoid this ambiguity. */ if (Path.WINDOWS) { if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) { conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath()); } } FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf); if (fc.util().exists(stagingPath)) { LOG.info(stagingPath + " exists! deleting..."); fc.delete(stagingPath, true); } LOG.info("mkdir: " + stagingPath); fc.mkdir(stagingPath, null, true); //mkdir done directory as well String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf); Path doneDirPath = fc.makeQualified(new Path(doneDir)); fc.mkdir(doneDirPath, null, true); } catch (IOException e) { throw new TezUncheckedException("Could not create staging directory. ", e); } conf.set(MRConfig.MASTER_ADDRESS, "test"); //configure the shuffle service in NM conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID }); conf.setClass( String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class); // Non-standard shuffle port conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0); conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class); // TestMRJobs is for testing non-uberized operation only; see TestUberAM // for corresponding uberized tests. conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); super.serviceInit(conf); }