List of usage examples for org.apache.hadoop.fs FileSystem setAcl
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException
From source file:alluxio.underfs.hdfs.acl.SupportedHdfsAclProvider.java
License:Apache License
@Override public void setAclEntries(FileSystem hdfs, String path, List<alluxio.security.authorization.AclEntry> aclEntries) throws IOException { // convert AccessControlList into hdfsAcl List<AclEntry> aclSpecs = new ArrayList<>(); for (alluxio.security.authorization.AclEntry entry : aclEntries) { AclEntry hdfsAclEntry = getHdfsAclEntry(entry); aclSpecs.add(hdfsAclEntry);/*from w ww.j a v a2 s .c o m*/ } // set hdfsAcl; try { hdfs.setAcl(new Path(path), aclSpecs); } catch (UnsupportedOperationException e) { // noop if hdfs does not support acl } }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
/** * {@inheritDoc}//from w w w. ja v a 2 s . c om */ @Override public void setAcl(Path path, final List<AclEntry> aclSpec) throws IOException { Path absF = fixRelativePart(path); new FileSystemLinkResolver<Void>() { @Override public Void doCall(final Path p) throws IOException { dfs.setAcl(getPathName(p), aclSpec); return null; } @Override public Void next(final FileSystem fs, final Path p) throws IOException { fs.setAcl(p, aclSpec); return null; } }.resolve(this, absF); }
From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutor.java
License:Apache License
@Override public void write(Batch batch) throws StageException { final ELVars variables = getContext().createELVars(); final FileSystem fs = hdfsConnection.getFs(); Iterator<Record> it = batch.getRecords(); while (it.hasNext()) { Record record = it.next();//ww w.java 2s . c o m RecordEL.setRecordInContext(variables, record); // Execute all configured HDFS metadata operations as target user try { hdfsConnection.getUGI().doAs((PrivilegedExceptionAction<Void>) () -> { Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath)); LOG.info("Working on file: " + workingFile); // Create empty file if configured if (actions.taskType == TaskType.CREATE_EMPTY_FILE) { ensureDirectoryExists(fs, workingFile.getParent()); if (!fs.createNewFile(workingFile)) { throw new IOException("Can't create file (probably already exists): " + workingFile); } } if (actions.taskType == TaskType.CHANGE_EXISTING_FILE && (actions.shouldMoveFile || actions.shouldRename)) { Path newPath = workingFile.getParent(); String newName = workingFile.getName(); if (actions.shouldMoveFile) { newPath = new Path(evaluate(variables, "newLocation", actions.newLocation)); } if (actions.shouldRename) { newName = evaluate(variables, "newName", actions.newName); } Path destinationFile = new Path(newPath, newName); ensureDirectoryExists(fs, newPath); LOG.debug("Renaming to: {}", destinationFile); if (!fs.rename(workingFile, destinationFile)) { throw new IOException( Utils.format("Can't rename '{}' to '{}''", workingFile, destinationFile)); } workingFile = destinationFile; } if (actions.taskType.isOneOf(TaskType.CHANGE_EXISTING_FILE, TaskType.CREATE_EMPTY_FILE)) { if (actions.shouldChangeOwnership) { String newOwner = evaluate(variables, "newOwner", actions.newOwner); String newGroup = evaluate(variables, "newGroup", actions.newGroup); LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup); fs.setOwner(workingFile, newOwner, newGroup); } if (actions.shouldSetPermissions) { String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions); FsPermission fsPerms = HdfsUtils.parseFsPermission(stringPerms); LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms); fs.setPermission(workingFile, fsPerms); } if (actions.shouldSetAcls) { String stringAcls = evaluate(variables, "newAcls", actions.newAcls); List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true); LOG.debug("Applying ACLs: {}", stringAcls); fs.setAcl(workingFile, acls); } } if (actions.taskType == TaskType.REMOVE_FILE) { fs.delete(workingFile, true); } // Issue event with the final file name (e.g. the renamed one if applicable) actions.taskType.getEventCreator().create(getContext()).with("filepath", workingFile.toString()) .with("filename", workingFile.getName()).createAndSend(); LOG.debug("Done changing metadata on file: {}", workingFile); return null; }); } catch (Throwable e) { // Hadoop libraries will wrap any non InterruptedException, RuntimeException, Error or IOException to UndeclaredThrowableException, // so we manually unwrap it here and properly propagate it to user. if (e instanceof UndeclaredThrowableException) { e = e.getCause(); } LOG.error("Failure when applying metadata changes to HDFS", e); errorRecordHandler.onError( new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage())); } } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsMetadataExecutor.java
License:Apache License
@Override public void write(Batch batch) throws StageException { final ELVars variables = getContext().createELVars(); final FileSystem fs = hdfsConnection.getFs(); Iterator<Record> it = batch.getRecords(); while (it.hasNext()) { Record record = it.next();/*w ww . ja va 2 s .co m*/ RecordEL.setRecordInContext(variables, record); // Execute all configured HDFS metadata operations as target user try { hdfsConnection.getUGI().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath)); LOG.info("Working on file: " + workingFile); if (actions.shouldMoveFile) { Path destinationFile = new Path( evaluate(variables, "newLocation", actions.newLocation)); Path destinationParent = destinationFile.getParent(); if (!fs.exists(destinationParent)) { LOG.debug("Creating parent directory for destination file: {}", destinationParent); if (!fs.mkdirs(destinationParent)) { throw new IOException("Can't create directory: " + destinationParent); } } LOG.debug("Renaming to: {}", destinationFile); if (!fs.rename(workingFile, destinationFile)) { throw new IOException("Can't rename file to: " + destinationFile); } workingFile = destinationFile; } if (actions.shouldChangeOwnership) { String newOwner = evaluate(variables, "newOwner", actions.newOwner); String newGroup = evaluate(variables, "newGroup", actions.newGroup); LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup); fs.setOwner(workingFile, newOwner, newGroup); } if (actions.shouldSetPermissions) { String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions); FsPermission fsPerms = new FsPermission(stringPerms); LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms); fs.setPermission(workingFile, fsPerms); } if (actions.shouldSetAcls) { String stringAcls = evaluate(variables, "newAcls", actions.newAcls); List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true); LOG.debug("Applying ACLs: {}", stringAcls); fs.setAcl(workingFile, acls); } // Issue event with the final file name (e.g. the renamed one if applicable) EventRecord event = getContext().createEventRecord("file-changed", 1); event.set(Field.create(Field.Type.MAP, new ImmutableMap.Builder<String, Field>() .put("filepath", Field.create(Field.Type.STRING, workingFile.toString())).build())); getContext().toEvent(event); LOG.debug("Done changing metadata on file: {}", workingFile); return null; } }); } catch (Exception e) { LOG.error("Failure when applying metadata changes to HDFS", e); errorRecordHandler.onError( new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage())); } } }
From source file:org.apache.oozie.action.hadoop.TestGitActionExecutor.java
License:Apache License
public void testAccessKeyACLsSecure() throws Exception { final GitActionExecutor ae = new GitActionExecutor(); final Path testKey = new Path(getAppPath().toString() + "/test_key"); createTestFile(testKey);/* w w w . j av a 2 s .c om*/ // set file permissions to be secure -- allowing only the owner to read final FileSystem fs = getFileSystem(); fs.setPermission(testKey, FsPermission.valueOf("-r--------")); fs.setAcl(testKey, AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true)); final String repoUrl = "https://github.com/apache/oozie"; final String keyUrl = testKey.toString(); final String destDir = "repoDir"; final String branch = "myBranch"; final Element actionXml = XmlUtils.parseXml("<git>" + "<resource-manager>" + getJobTrackerUri() + "</resource-manager>" + "<name-node>" + getNameNodeUri() + "</name-node>" + "<git-uri>" + repoUrl + "</git-uri>" + "<branch>" + branch + "</branch>" + "<key-path>" + keyUrl + "</key-path>" + "<destination-uri>" + destDir + "</destination-uri>" + "</git>"); final XConfiguration protoConf = new XConfiguration(); protoConf.set(WorkflowAppService.HADOOP_USER, getTestUser()); final WorkflowJobBean wf = createBaseWorkflow(protoConf, GitActionExecutor.GIT_ACTION_TYPE + "-action"); final WorkflowActionBean action = (WorkflowActionBean) wf.getActions().get(0); action.setType(ae.getType()); final Context context = new Context(wf, action); final Configuration conf = ae.createBaseHadoopConf(context, actionXml); try { ae.setupActionConf(conf, context, actionXml, getFsTestCaseDir()); } catch (final ActionExecutorException e) { fail("Unexpected exception, could not check ACLs: " + e.getMessage()); } }
From source file:org.apache.oozie.action.hadoop.TestGitActionExecutor.java
License:Apache License
public void testAccessKeyACLsInsecure() throws Exception { final GitActionExecutor ae = new GitActionExecutor(); final Path testKey = new Path(getAppPath().toString() + "/test_key"); createTestFile(testKey);//from w w w .ja v a2 s . com // set file permissions to be secure -- allowing only the owner to read final FileSystem fs = getFileSystem(); fs.setPermission(testKey, FsPermission.valueOf("-r--------")); fs.setAcl(testKey, AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::r--", true)); final String repoUrl = "https://github.com/apache/oozie"; final String keyUrl = testKey.toString(); final String destDir = "repoDir"; final String branch = "myBranch"; final Element actionXml = XmlUtils.parseXml("<git>" + "<resource-manager>" + getJobTrackerUri() + "</resource-manager>" + "<name-node>" + getNameNodeUri() + "</name-node>" + "<git-uri>" + repoUrl + "</git-uri>" + "<branch>" + branch + "</branch>" + "<key-path>" + keyUrl + "</key-path>" + "<destination-uri>" + destDir + "</destination-uri>" + "</git>"); final XConfiguration protoConf = new XConfiguration(); protoConf.set(WorkflowAppService.HADOOP_USER, getTestUser()); final WorkflowJobBean wf = createBaseWorkflow(protoConf, GitActionExecutor.GIT_ACTION_TYPE + "-action"); final WorkflowActionBean action = (WorkflowActionBean) wf.getActions().get(0); action.setType(ae.getType()); final Context context = new Context(wf, action); try { ae.createBaseHadoopConf(context, actionXml); } catch (final Exception e) { fail("Unexpected exception, could not create Hadoop configuration with insecure setup: " + e.getMessage()); } }
From source file:org.apache.sentry.hdfs.TestSentryAuthorizationProvider.java
License:Apache License
@Test public void testProvider() throws Exception { admin.doAs(new PrivilegedExceptionAction<Void>() { @Override//from w w w .j av a 2 s . c om public Void run() throws Exception { String sysUser = UserGroupInformation.getCurrentUser().getShortUserName(); FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); List<AclEntry> baseAclList = new ArrayList<AclEntry>(); AclEntry.Builder builder = new AclEntry.Builder(); baseAclList.add(builder.setType(AclEntryType.USER).setScope(AclEntryScope.ACCESS).build()); baseAclList.add(builder.setType(AclEntryType.GROUP).setScope(AclEntryScope.ACCESS).build()); baseAclList.add(builder.setType(AclEntryType.OTHER).setScope(AclEntryScope.ACCESS).build()); Path path1 = new Path("/user/authz/obj/xxx"); fs.mkdirs(path1); fs.setAcl(path1, baseAclList); fs.mkdirs(new Path("/user/authz/xxx")); fs.mkdirs(new Path("/user/xxx")); // root Path path = new Path("/"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir before prefixes path = new Path("/user"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // prefix dir path = new Path("/user/authz"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir inside of prefix, no obj path = new Path("/user/authz/xxx"); FileStatus status = fs.getFileStatus(path); Assert.assertEquals(sysUser, status.getOwner()); Assert.assertEquals("supergroup", status.getGroup()); Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir inside of prefix, obj path = new Path("/user/authz/obj"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0770), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); List<AclEntry> acls = new ArrayList<AclEntry>(); acls.add(new AclEntry.Builder().setName(sysUser).setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build()); acls.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build()); acls.add(new AclEntry.Builder().setName("user-authz").setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build()); Assert.assertEquals(new LinkedHashSet<AclEntry>(acls), new LinkedHashSet<AclEntry>(fs.getAclStatus(path).getEntries())); // dir inside of prefix, inside of obj path = new Path("/user/authz/obj/xxx"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0770), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); Path path2 = new Path("/user/authz/obj/path2"); fs.mkdirs(path2); fs.setAcl(path2, baseAclList); // dir outside of prefix path = new Path("/user/xxx"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); return null; } }); }
From source file:org.apache.sentry.hdfs.TestSentryINodeAttributesProvider.java
License:Apache License
@Test public void testProvider() throws Exception { admin.doAs(new PrivilegedExceptionAction<Void>() { @Override//from w w w . ja v a 2 s .c om public Void run() throws Exception { String sysUser = UserGroupInformation.getCurrentUser().getShortUserName(); FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); List<AclEntry> baseAclList = new ArrayList<AclEntry>(); AclEntry.Builder builder = new AclEntry.Builder(); baseAclList.add(builder.setType(AclEntryType.USER).setScope(AclEntryScope.ACCESS).build()); baseAclList.add(builder.setType(AclEntryType.GROUP).setScope(AclEntryScope.ACCESS).build()); baseAclList.add(builder.setType(AclEntryType.OTHER).setScope(AclEntryScope.ACCESS).build()); Path path1 = new Path("/user/authz/obj/xxx"); fs.mkdirs(path1); fs.setAcl(path1, baseAclList); fs.mkdirs(new Path("/user/authz/xxx")); fs.mkdirs(new Path("/user/xxx")); // root Path path = new Path("/"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir before prefixes path = new Path("/user"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // prefix dir path = new Path("/user/authz"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir inside of prefix, no obj path = new Path("/user/authz/xxx"); FileStatus status = fs.getFileStatus(path); Assert.assertEquals(sysUser, status.getOwner()); Assert.assertEquals("supergroup", status.getGroup()); Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // dir inside of prefix, obj path = new Path("/user/authz/obj"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); List<AclEntry> acls = new ArrayList<AclEntry>(); acls.add(new AclEntry.Builder().setName(sysUser).setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build()); acls.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build()); acls.add(new AclEntry.Builder().setName("user-authz").setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.ALL).build()); Assert.assertEquals(new LinkedHashSet<AclEntry>(acls), new LinkedHashSet<AclEntry>(fs.getAclStatus(path).getEntries())); // dir inside of prefix, inside of obj path = new Path("/user/authz/obj/xxx"); Assert.assertEquals("hive", fs.getFileStatus(path).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(path).getPermission()); Assert.assertFalse(fs.getAclStatus(path).getEntries().isEmpty()); Path path2 = new Path("/user/authz/obj/path2"); fs.mkdirs(path2); fs.setAcl(path2, baseAclList); // dir outside of prefix path = new Path("/user/xxx"); Assert.assertEquals(sysUser, fs.getFileStatus(path).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(path).getGroup()); Assert.assertEquals(new FsPermission((short) 0755), fs.getFileStatus(path).getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); //stale and dir inside of prefix, obj System.setProperty("test.stale", "true"); path = new Path("/user/authz/xxx"); status = fs.getFileStatus(path); Assert.assertEquals(sysUser, status.getOwner()); Assert.assertEquals("supergroup", status.getGroup()); Assert.assertEquals(new FsPermission((short) 0755), status.getPermission()); Assert.assertTrue(fs.getAclStatus(path).getEntries().isEmpty()); // setPermission sets the permission for dir outside of prefix. // setUser/setGroup sets the user/group for dir outside of prefix. Path pathOutside = new Path("/user/xxx"); fs.setPermission(pathOutside, new FsPermission((short) 0000)); Assert.assertEquals(new FsPermission((short) 0000), fs.getFileStatus(pathOutside).getPermission()); fs.setOwner(pathOutside, sysUser, "supergroup"); Assert.assertEquals(sysUser, fs.getFileStatus(pathOutside).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(pathOutside).getGroup()); // removeAcl removes the ACL entries for dir outside of prefix. List<AclEntry> aclsOutside = new ArrayList<AclEntry>(baseAclList); List<AclEntry> acl = new ArrayList<AclEntry>(); acl.add(new AclEntry.Builder().setName("supergroup").setType(AclEntryType.GROUP) .setScope(AclEntryScope.ACCESS).setPermission(FsAction.READ_EXECUTE).build()); aclsOutside.addAll(acl); fs.setAcl(pathOutside, aclsOutside); fs.removeAclEntries(pathOutside, acl); Assert.assertFalse(fs.getAclStatus(pathOutside).getEntries().containsAll(acl)); // setPermission sets the permission for dir inside of prefix but not a hive obj. // setUser/setGroup sets the user/group for dir inside of prefix but not a hive obj. Path pathInside = new Path("/user/authz/xxx"); fs.setPermission(pathInside, new FsPermission((short) 0000)); Assert.assertEquals(new FsPermission((short) 0000), fs.getFileStatus(pathInside).getPermission()); fs.setOwner(pathInside, sysUser, "supergroup"); Assert.assertEquals(sysUser, fs.getFileStatus(pathInside).getOwner()); Assert.assertEquals("supergroup", fs.getFileStatus(pathInside).getGroup()); // removeAcl is a no op for dir inside of prefix. Assert.assertTrue(fs.getAclStatus(pathInside).getEntries().isEmpty()); fs.removeAclEntries(pathInside, acl); Assert.assertTrue(fs.getAclStatus(pathInside).getEntries().isEmpty()); // setPermission/setUser/setGroup is a no op for dir inside of prefix, and is a hive obj. Path pathInsideAndHive = new Path("/user/authz/obj"); fs.setPermission(pathInsideAndHive, new FsPermission((short) 0000)); Assert.assertEquals(new FsPermission((short) 0771), fs.getFileStatus(pathInsideAndHive).getPermission()); fs.setOwner(pathInsideAndHive, sysUser, "supergroup"); Assert.assertEquals("hive", fs.getFileStatus(pathInsideAndHive).getOwner()); Assert.assertEquals("hive", fs.getFileStatus(pathInsideAndHive).getGroup()); return null; } }); }