List of usage examples for org.apache.hadoop.fs FileStatus getPermission
public FsPermission getPermission()
From source file:TestFuseDFS.java
License:Apache License
/** * use shell to create a dir and then use filesys to see it exists. *///from w ww. j av a 2 s . c o m public void testChmod() throws IOException, InterruptedException, Exception { try { // First create a new directory with mkdirs Path path = new Path("/foo"); Runtime r = Runtime.getRuntime(); String cmd = "mkdir -p " + mpoint + path.toString(); Process p = r.exec(cmd); assertTrue(p.waitFor() == 0); // check it is there assertTrue(fileSys.getFileStatus(path).isDir()); cmd = "chmod 777 " + mpoint + path.toString(); p = r.exec(cmd); assertTrue(p.waitFor() == 0); FileStatus foo = fileSys.getFileStatus(path); FsPermission perm = foo.getPermission(); assertTrue(perm.toShort() == 0777); } catch (Exception e) { e.printStackTrace(); throw e; } }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
@Test public void createFileWithPermission() throws Exception { List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733, 0644, 0533, 0511);/* ww w. ja va2s.c om*/ for (int value : permissionValues) { Path file = new Path("/createfile" + value); FsPermission permission = FsPermission.createImmutable((short) value); FSDataOutputStream o = sTFS.create(file, permission, false /* ignored */, 10 /* ignored */, (short) 1 /* ignored */, 512 /* ignored */, null /* ignored */); o.writeBytes("Test Bytes"); o.close(); FileStatus fs = sTFS.getFileStatus(file); Assert.assertEquals(permission, fs.getPermission()); } }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
@Test public void mkdirsWithPermission() throws Exception { List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733, 0644, 0533, 0511);/*www . ja v a2 s. c om*/ for (int value : permissionValues) { Path dir = new Path("/createDir" + value); FsPermission permission = FsPermission.createImmutable((short) value); sTFS.mkdirs(dir, permission); FileStatus fs = sTFS.getFileStatus(dir); Assert.assertEquals(permission, fs.getPermission()); } }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Test for {@link FileSystem#setPermission(Path, org.apache.hadoop.fs.permission.FsPermission)}. * It will test changing the permission of file using TFS. *///from www . ja v a2 s . com @Test public void chmod() throws Exception { Path fileA = new Path("/chmodfileA"); create(sTFS, fileA); FileStatus fs = sTFS.getFileStatus(fileA); Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, fileA))); if (UnderFileSystemUtils.isHdfs(sUfs) && HadoopClientTestUtils.isHadoop1x()) { // If the UFS is hadoop 1.0, the org.apache.hadoop.fs.FileSystem.create uses default // permission option 0777. Assert.assertEquals((short) 0777, fs.getPermission().toShort()); } else { // Default permission should be 0644. Assert.assertEquals((short) 0644, fs.getPermission().toShort()); } sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755)); Assert.assertEquals((short) 0755, sTFS.getFileStatus(fileA).getPermission().toShort()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Tests the directory permission propagation to UFS. */// w w w . jav a 2 s .com @Test public void directoryPermissionForUfs() throws IOException { // Skip non-local and non-HDFS UFSs. Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || UnderFileSystemUtils.isHdfs(sUfs)); Path dir = new Path("/root/directoryPermissionForUfsDir"); sTFS.mkdirs(dir); FileStatus fs = sTFS.getFileStatus(dir); String defaultOwner = fs.getOwner(); Short dirMode = fs.getPermission().toShort(); FileStatus parentFs = sTFS.getFileStatus(dir.getParent()); Short parentMode = parentFs.getPermission().toShort(); UfsStatus ufsStatus = sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir)); Assert.assertEquals(defaultOwner, ufsStatus.getOwner()); Assert.assertEquals((int) dirMode, (int) ufsStatus.getMode()); Assert.assertEquals((int) parentMode, (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir.getParent())).getMode()); short newMode = (short) 0755; FsPermission newPermission = new FsPermission(newMode); sTFS.setPermission(dir, newPermission); Assert.assertEquals((int) newMode, (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir)).getMode()); }
From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Test for {@link FileSystem#setPermission(Path, org.apache.hadoop.fs.permission.FsPermission)}. * It will test changing the permission of file using TFS. *//* www .ja va 2s. c o m*/ @Test public void chmod() throws Exception { Path fileA = new Path("/chmodfileA"); create(sTFS, fileA); FileStatus fs = sTFS.getFileStatus(fileA); Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, fileA))); // Default permission should be 0644 Assert.assertEquals((short) 0644, fs.getPermission().toShort()); if (CommonUtils.isUfsObjectStorage(sUfsRoot)) { // For object storage ufs, setMode is not supported. mThrown.expect(IOException.class); mThrown.expectMessage("setOwner/setMode is not supported to object storage UFS via Alluxio."); sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755)); return; } sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755)); Assert.assertEquals((short) 0755, sTFS.getFileStatus(fileA).getPermission().toShort()); }
From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Tests the directory permission propagation to UFS. *///from ww w .ja v a2s . co m @Test public void directoryPermissionForUfs() throws IOException { if (!(sUfs instanceof LocalUnderFileSystem) && !(sUfs instanceof HdfsUnderFileSystem)) { // Skip non-local and non-HDFS UFSs. return; } Path dir = new Path("/root/dir/"); sTFS.mkdirs(dir); FileStatus fs = sTFS.getFileStatus(dir); String defaultOwner = fs.getOwner(); Short dirMode = fs.getPermission().toShort(); FileStatus parentFs = sTFS.getFileStatus(dir.getParent()); Short parentMode = parentFs.getPermission().toShort(); Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, dir))); Assert.assertEquals((int) dirMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir))); Assert.assertEquals((int) parentMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir.getParent()))); short newMode = (short) 0755; FsPermission newPermission = new FsPermission(newMode); sTFS.setPermission(dir, newPermission); Assert.assertEquals((int) newMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir))); }
From source file:alluxio.hadoop.HadoopUtils.java
License:Apache License
/** * Returns a string representation of a Hadoop {@link FileStatus}. * * @param fs Hadoop {@link FileStatus}/*from w ww . j av a2 s.c o m*/ * @return its string representation */ public static String toStringHadoopFileStatus(FileStatus fs) { StringBuilder sb = new StringBuilder(); sb.append("HadoopFileStatus: Path: ").append(fs.getPath()); sb.append(" , Length: ").append(fs.getLen()); // Use isDir instead of isDirectory for compatibility with hadoop 1. sb.append(" , IsDir: ").append(fs.isDir()); sb.append(" , BlockReplication: ").append(fs.getReplication()); sb.append(" , BlockSize: ").append(fs.getBlockSize()); sb.append(" , ModificationTime: ").append(fs.getModificationTime()); sb.append(" , AccessTime: ").append(fs.getAccessTime()); sb.append(" , Permission: ").append(fs.getPermission()); sb.append(" , Owner: ").append(fs.getOwner()); sb.append(" , Group: ").append(fs.getGroup()); return sb.toString(); }
From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java
License:Apache License
@Override public void setMode(String path, short mode) throws IOException { try {/*from w w w .j a va2s. c om*/ FileStatus fileStatus = mFileSystem.getFileStatus(new Path(path)); LOG.info("Changing file '{}' permissions from: {} to {}", fileStatus.getPath(), fileStatus.getPermission(), mode); mFileSystem.setPermission(fileStatus.getPath(), new FsPermission(mode)); } catch (IOException e) { LOG.error("Fail to set permission for {} with perm {}", path, mode, e); throw e; } }
From source file:azkaban.crypto.Decryptions.java
License:Open Source License
public String decrypt(final String cipheredText, final String passphrasePath, final FileSystem fs) throws IOException { Preconditions.checkNotNull(cipheredText); Preconditions.checkNotNull(passphrasePath); final Path path = new Path(passphrasePath); Preconditions.checkArgument(fs.exists(path), "File does not exist at " + passphrasePath); Preconditions.checkArgument(fs.isFile(path), "Passphrase path is not a file. " + passphrasePath); final FileStatus fileStatus = fs.getFileStatus(path); Preconditions.checkArgument(USER_READ_PERMISSION_ONLY.equals(fileStatus.getPermission()), "Passphrase file should only have read only permission on only user. " + passphrasePath); final Crypto crypto = new Crypto(); try (BufferedReader br = new BufferedReader( new InputStreamReader(fs.open(path), Charset.defaultCharset()))) { final String passphrase = br.readLine(); final String decrypted = crypto.decrypt(cipheredText, passphrase); Preconditions.checkNotNull(decrypted, "Was not able to decrypt"); return decrypted; }//from w w w . j av a2s .c o m }