List of usage examples for org.apache.hadoop.fs FileSystem setOwner
public void setOwner(Path p, String username, String groupname) throws IOException
From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java
License:Open Source License
/** * Change Ownership of a Path/* w w w. j ava2 s .c o m*/ * * @param fs * @param path * @param owner * @param group * @param recursive * @return Error Message */ protected String changeOwnership(FileSystem fs, Path path, String owner, String group, boolean recursive) { String error = null; try { FileStatus stat = fs.getFileStatus(path); if (stat.getOwner().equals(System.getProperty("user.name"))) { if (recursive) { FileStatus[] fsA = fs.listStatus(path); for (int i = 0; i < fsA.length && error == null; ++i) { error = changeOwnership(fs, fsA[i].getPath(), owner, group, recursive); } } if (error == null) { fs.setOwner(path, owner, group); } } else { error = LanguageManagerWF.getText("HdfsInterface.changeprop.ownererror", new Object[] { path.toString() }); } // fs.close(); } catch (IOException e) { logger.error("Cannot operate on the file or directory: " + path.toString()); logger.error(e.getMessage()); error = LanguageManagerWF.getText("HdfsInterface.changeprop.fileaccess", new Object[] { path }); } if (error != null) { logger.debug(error); } return error; }
From source file:com.revolutionanalytics.hadoop.hdfs.FileUtils.java
License:Apache License
public static void setOwner(FileSystem fs, String[] p, String[] ow, String[] gp) throws IOException { for (int i = 0; i < p.length; i++) { String a, b;/* w w w. j a v a 2 s . c om*/ if (ow[i].equals("")) a = null; else a = ow[i]; if (gp[i].equals("")) b = null; else b = gp[i]; if (a == null && b == null) continue; fs.setOwner(new Path(p[i]), a, b); } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutor.java
License:Apache License
@Override public void write(Batch batch) throws StageException { final ELVars variables = getContext().createELVars(); final FileSystem fs = hdfsConnection.getFs(); Iterator<Record> it = batch.getRecords(); while (it.hasNext()) { Record record = it.next();/* w w w. j a v a 2s.c o m*/ RecordEL.setRecordInContext(variables, record); // Execute all configured HDFS metadata operations as target user try { hdfsConnection.getUGI().doAs((PrivilegedExceptionAction<Void>) () -> { Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath)); LOG.info("Working on file: " + workingFile); // Create empty file if configured if (actions.taskType == TaskType.CREATE_EMPTY_FILE) { ensureDirectoryExists(fs, workingFile.getParent()); if (!fs.createNewFile(workingFile)) { throw new IOException("Can't create file (probably already exists): " + workingFile); } } if (actions.taskType == TaskType.CHANGE_EXISTING_FILE && (actions.shouldMoveFile || actions.shouldRename)) { Path newPath = workingFile.getParent(); String newName = workingFile.getName(); if (actions.shouldMoveFile) { newPath = new Path(evaluate(variables, "newLocation", actions.newLocation)); } if (actions.shouldRename) { newName = evaluate(variables, "newName", actions.newName); } Path destinationFile = new Path(newPath, newName); ensureDirectoryExists(fs, newPath); LOG.debug("Renaming to: {}", destinationFile); if (!fs.rename(workingFile, destinationFile)) { throw new IOException( Utils.format("Can't rename '{}' to '{}''", workingFile, destinationFile)); } workingFile = destinationFile; } if (actions.taskType.isOneOf(TaskType.CHANGE_EXISTING_FILE, TaskType.CREATE_EMPTY_FILE)) { if (actions.shouldChangeOwnership) { String newOwner = evaluate(variables, "newOwner", actions.newOwner); String newGroup = evaluate(variables, "newGroup", actions.newGroup); LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup); fs.setOwner(workingFile, newOwner, newGroup); } if (actions.shouldSetPermissions) { String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions); FsPermission fsPerms = HdfsUtils.parseFsPermission(stringPerms); LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms); fs.setPermission(workingFile, fsPerms); } if (actions.shouldSetAcls) { String stringAcls = evaluate(variables, "newAcls", actions.newAcls); List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true); LOG.debug("Applying ACLs: {}", stringAcls); fs.setAcl(workingFile, acls); } } if (actions.taskType == TaskType.REMOVE_FILE) { fs.delete(workingFile, true); } // Issue event with the final file name (e.g. the renamed one if applicable) actions.taskType.getEventCreator().create(getContext()).with("filepath", workingFile.toString()) .with("filename", workingFile.getName()).createAndSend(); LOG.debug("Done changing metadata on file: {}", workingFile); return null; }); } catch (Throwable e) { // Hadoop libraries will wrap any non InterruptedException, RuntimeException, Error or IOException to UndeclaredThrowableException, // so we manually unwrap it here and properly propagate it to user. if (e instanceof UndeclaredThrowableException) { e = e.getCause(); } LOG.error("Failure when applying metadata changes to HDFS", e); errorRecordHandler.onError( new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage())); } } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsMetadataExecutor.java
License:Apache License
@Override public void write(Batch batch) throws StageException { final ELVars variables = getContext().createELVars(); final FileSystem fs = hdfsConnection.getFs(); Iterator<Record> it = batch.getRecords(); while (it.hasNext()) { Record record = it.next();/*from www . j a v a 2s. c om*/ RecordEL.setRecordInContext(variables, record); // Execute all configured HDFS metadata operations as target user try { hdfsConnection.getUGI().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { Path workingFile = new Path(evaluate(variables, "filePath", actions.filePath)); LOG.info("Working on file: " + workingFile); if (actions.shouldMoveFile) { Path destinationFile = new Path( evaluate(variables, "newLocation", actions.newLocation)); Path destinationParent = destinationFile.getParent(); if (!fs.exists(destinationParent)) { LOG.debug("Creating parent directory for destination file: {}", destinationParent); if (!fs.mkdirs(destinationParent)) { throw new IOException("Can't create directory: " + destinationParent); } } LOG.debug("Renaming to: {}", destinationFile); if (!fs.rename(workingFile, destinationFile)) { throw new IOException("Can't rename file to: " + destinationFile); } workingFile = destinationFile; } if (actions.shouldChangeOwnership) { String newOwner = evaluate(variables, "newOwner", actions.newOwner); String newGroup = evaluate(variables, "newGroup", actions.newGroup); LOG.debug("Applying ownership: user={} and group={}", newOwner, newGroup); fs.setOwner(workingFile, newOwner, newGroup); } if (actions.shouldSetPermissions) { String stringPerms = evaluate(variables, "newPermissions", actions.newPermissions); FsPermission fsPerms = new FsPermission(stringPerms); LOG.debug("Applying permissions: {} loaded from value '{}'", fsPerms, stringPerms); fs.setPermission(workingFile, fsPerms); } if (actions.shouldSetAcls) { String stringAcls = evaluate(variables, "newAcls", actions.newAcls); List<AclEntry> acls = AclEntry.parseAclSpec(stringAcls, true); LOG.debug("Applying ACLs: {}", stringAcls); fs.setAcl(workingFile, acls); } // Issue event with the final file name (e.g. the renamed one if applicable) EventRecord event = getContext().createEventRecord("file-changed", 1); event.set(Field.create(Field.Type.MAP, new ImmutableMap.Builder<String, Field>() .put("filepath", Field.create(Field.Type.STRING, workingFile.toString())).build())); getContext().toEvent(event); LOG.debug("Done changing metadata on file: {}", workingFile); return null; } }); } catch (Exception e) { LOG.error("Failure when applying metadata changes to HDFS", e); errorRecordHandler.onError( new OnRecordErrorException(record, HdfsMetadataErrors.HDFS_METADATA_000, e.getMessage())); } } }
From source file:com.trendmicro.hdfs.webdav.test.TestCopySimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/rw"), new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/ro"), new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); FSDataOutputStream os = fs.create(new Path("/test/rw/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null);/*from w ww.j a va 2 s . com*/ assertNotNull(os); os.write(testData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestDeleteSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/private"), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); FSDataOutputStream os = fs.create(new Path("/test/private/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null);/*w w w .ja v a2 s . com*/ assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/private/file2"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/public/file3"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/public/file4"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestGetSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); FSDataOutputStream os;/*w w w . j a va 2s. c o m*/ os = fs.create(new Path("/test/pubdata"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testPublicData.getBytes()); os.close(); os = fs.create(new Path("/test/privdata"), new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testPrivateData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestMkcolSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/private"), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); return null; }//from w w w . j a va 2 s . c o m }); }
From source file:com.trendmicro.hdfs.webdav.test.TestMoveSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/owner"), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); FSDataOutputStream os = fs.create(new Path("/test/owner/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null);//from w w w . ja v a2 s . c o m assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/public/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestPropfindSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); for (Path dir : publicDirPaths) { assertTrue(//from w w w . j av a 2 s . c o m fs.mkdirs(dir, new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); } for (Path dir : privateDirPaths) { assertTrue(fs.mkdirs(dir, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE))); } for (Path path : publicFilePaths) { FSDataOutputStream os = fs.create(path, new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testPublicData.getBytes()); os.close(); } for (Path path : privateFilePaths) { FSDataOutputStream os = fs.create(path, new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testPrivateData.getBytes()); os.close(); } return null; } }); }