List of usage examples for org.apache.hadoop.fs Path Path
public Path(URI aUri)
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Test for {@link FileSystem#setOwner(Path, String, String)} with HDFS UFS. It will test * changing both owner and group of file using TFS and propagate the change to UFS. *//*from ww w. j av a 2 s .c om*/ @Test public void changeNonexistentOwnerAndGroupForHdfs() throws Exception { // Skip non-HDFS UFSs. Assume.assumeTrue(UnderFileSystemUtils.isHdfs(sUfs)); Path fileC = new Path("/chownfileC-hdfs"); final String testOwner = "test-user1"; final String testGroup = "test-group1"; create(sTFS, fileC); FileStatus fs = sTFS.getFileStatus(fileC); String defaultOwner = fs.getOwner(); String defaultGroup = fs.getGroup(); Assert.assertEquals(defaultOwner, sUfs.getFileStatus(PathUtils.concatPath(sUfsRoot, fileC)).getOwner()); // Group can different because HDFS user to group mapping can be different from that in Alluxio. Assert.assertNotEquals(defaultOwner, testOwner); Assert.assertNotEquals(defaultGroup, testGroup); sTFS.setOwner(fileC, testOwner, testGroup); fs = sTFS.getFileStatus(fileC); Assert.assertEquals(testOwner, fs.getOwner()); Assert.assertEquals(testGroup, fs.getGroup()); UfsStatus ufsStatus = sUfs.getFileStatus(PathUtils.concatPath(sUfsRoot, fileC)); Assert.assertEquals(testOwner, ufsStatus.getOwner()); Assert.assertEquals(testGroup, ufsStatus.getGroup()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Test for {@link FileSystem#setOwner(Path, String, String)}. It will test both owner and group * are null./*w w w . j a va2 s .c o m*/ */ @Test public void checkNullOwnerAndGroup() throws Exception { Path fileD = new Path("/chownfileD"); create(sTFS, fileD); FileStatus fs = sTFS.getFileStatus(fileD); String defaultOwner = fs.getOwner(); String defaultGroup = fs.getGroup(); sTFS.setOwner(fileD, null, null); fs = sTFS.getFileStatus(fileD); Assert.assertEquals(defaultOwner, fs.getOwner()); Assert.assertEquals(defaultGroup, fs.getGroup()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Tests the directory permission propagation to UFS. *//* w w w .ja va 2s .com*/ @Test public void directoryPermissionForUfs() throws IOException { // Skip non-local and non-HDFS UFSs. Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || UnderFileSystemUtils.isHdfs(sUfs)); Path dir = new Path("/root/directoryPermissionForUfsDir"); sTFS.mkdirs(dir); FileStatus fs = sTFS.getFileStatus(dir); String defaultOwner = fs.getOwner(); Short dirMode = fs.getPermission().toShort(); FileStatus parentFs = sTFS.getFileStatus(dir.getParent()); Short parentMode = parentFs.getPermission().toShort(); UfsStatus ufsStatus = sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir)); Assert.assertEquals(defaultOwner, ufsStatus.getOwner()); Assert.assertEquals((int) dirMode, (int) ufsStatus.getMode()); Assert.assertEquals((int) parentMode, (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir.getParent())).getMode()); short newMode = (short) 0755; FsPermission newPermission = new FsPermission(newMode); sTFS.setPermission(dir, newPermission); Assert.assertEquals((int) newMode, (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dir)).getMode()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Tests the parent directory permission when mkdirs recursively. */// www .ja va 2 s .co m @Test public void parentDirectoryPermissionForUfs() throws IOException { // Skip non-local and non-HDFS UFSs. Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || UnderFileSystemUtils.isHdfs(sUfs)); String path = "/root/parentDirectoryPermissionForUfsDir/parentDirectoryPermissionForUfsFile"; Path fileA = new Path(path); Path dirA = fileA.getParent(); sTFS.mkdirs(dirA); short parentMode = (short) 0700; FsPermission newPermission = new FsPermission(parentMode); sTFS.setPermission(dirA, newPermission); create(sTFS, fileA); Assert.assertEquals((int) parentMode, (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, dirA)).getMode()); // Rename from dirA to dirB, file and its parent permission should be in sync with the source // dirA. Path fileB = new Path("/root/dirB/fileB"); Path dirB = fileB.getParent(); sTFS.rename(dirA, dirB); Assert.assertEquals((int) parentMode, (int) sUfs.getDirectoryStatus(PathUtils.concatPath(sUfsRoot, fileB.getParent())).getMode()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Tests the loaded file metadata from UFS having the same mode as that in the UFS. *///w w w. j a va2 s.c om @Test public void loadFileMetadataMode() throws Exception { // Skip non-local and non-HDFS-2 UFSs. Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || (UnderFileSystemUtils.isHdfs(sUfs) && HadoopClientTestUtils.isHadoop2x())); List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733, 0644, 0533, 0511); for (int value : permissionValues) { Path file = new Path("/loadFileMetadataMode" + value); sTFS.delete(file, false); // Create a file directly in UFS and set the corresponding mode. String ufsPath = PathUtils.concatPath(sUfsRoot, file); sUfs.create(ufsPath, CreateOptions.defaults(ServerConfiguration.global()).setOwner("testuser") .setGroup("testgroup").setMode(new Mode((short) value))).close(); Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, file))); // Check the mode is consistent in Alluxio namespace once it's loaded from UFS to Alluxio. Assert.assertEquals(new Mode((short) value).toString(), new Mode(sTFS.getFileStatus(file).getPermission().toShort()).toString()); } }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
/** * Tests the loaded directory metadata from UFS having the same mode as that in the UFS. *//*w ww. j ava2 s . c o m*/ @Test public void loadDirMetadataMode() throws Exception { // Skip non-local and non-HDFS UFSs. Assume.assumeTrue(UnderFileSystemUtils.isLocal(sUfs) || UnderFileSystemUtils.isHdfs(sUfs)); List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733, 0644, 0533, 0511); for (int value : permissionValues) { Path dir = new Path("/loadDirMetadataMode" + value + "/"); sTFS.delete(dir, true); // Create a directory directly in UFS and set the corresponding mode. String ufsPath = PathUtils.concatPath(sUfsRoot, dir); sUfs.mkdirs(ufsPath, MkdirsOptions.defaults(ServerConfiguration.global()).setCreateParent(false) .setOwner("testuser").setGroup("testgroup").setMode(new Mode((short) value))); Assert.assertTrue(sUfs.isDirectory(PathUtils.concatPath(sUfsRoot, dir))); // Check the mode is consistent in Alluxio namespace once it's loaded from UFS to Alluxio. Assert.assertEquals(new Mode((short) value).toString(), new Mode(sTFS.getFileStatus(dir).getPermission().toShort()).toString()); } }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
@Test public void s3GetPermission() throws Exception { Assume.assumeTrue(UnderFileSystemUtils.isS3(sUfs)); ServerConfiguration.unset(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING); Path fileA = new Path("/s3GetPermissionFile"); create(sTFS, fileA);//from ww w. j a v a 2s . c o m Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, fileA))); // Without providing "alluxio.underfs.s3.canonical.owner.id.to.username.mapping", the default // display name of the S3 owner account is NOT empty. UfsStatus ufsStatus = sUfs.getFileStatus(PathUtils.concatPath(sUfsRoot, fileA)); Assert.assertNotEquals("", ufsStatus.getOwner()); Assert.assertNotEquals("", ufsStatus.getGroup()); Assert.assertEquals((short) 0700, ufsStatus.getMode()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
@Test public void gcsGetPermission() throws Exception { Assume.assumeTrue(UnderFileSystemUtils.isGcs(sUfs)); ServerConfiguration.unset(PropertyKey.UNDERFS_GCS_OWNER_ID_TO_USERNAME_MAPPING); Path fileA = new Path("/gcsGetPermissionFile"); create(sTFS, fileA);/* w w w. jav a 2 s.co m*/ Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, fileA))); // Without providing "alluxio.underfs.gcs.owner.id.to.username.mapping", the default // display name of the GCS owner account is empty. The owner will be the GCS account id, which // is not empty. UfsStatus ufsStatus = sUfs.getFileStatus(PathUtils.concatPath(sUfsRoot, fileA)); Assert.assertNotEquals("", ufsStatus.getOwner()); Assert.assertNotEquals("", ufsStatus.getGroup()); Assert.assertEquals((short) 0700, ufsStatus.getMode()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
@Test public void swiftGetPermission() throws Exception { Assume.assumeTrue(UnderFileSystemUtils.isSwift(sUfs)); Path fileA = new Path("/swiftGetPermissionFile"); create(sTFS, fileA);//from ww w. j a v a2 s .c o m Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, fileA))); UfsStatus ufsStatus = sUfs.getFileStatus(PathUtils.concatPath(sUfsRoot, fileA)); Assert.assertNotEquals("", ufsStatus.getOwner()); Assert.assertNotEquals("", ufsStatus.getGroup()); Assert.assertEquals((short) 0700, ufsStatus.getMode()); }
From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java
License:Apache License
@Test public void ossGetPermission() throws Exception { Assume.assumeTrue(UnderFileSystemUtils.isOss(sUfs)); Path fileA = new Path("/objectfileA"); create(sTFS, fileA);/* ww w. ja va 2 s . c o m*/ Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, fileA))); // Verify the owner, group and permission of OSS UFS is not supported and thus returns default // values. UfsStatus ufsStatus = sUfs.getFileStatus(PathUtils.concatPath(sUfsRoot, fileA)); Assert.assertNotEquals("", ufsStatus.getOwner()); Assert.assertNotEquals("", ufsStatus.getGroup()); Assert.assertEquals(Constants.DEFAULT_FILE_SYSTEM_MODE, ufsStatus.getMode()); }