Example usage for org.apache.hadoop.fs Path Path

List of usage examples for org.apache.hadoop.fs Path Path

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path Path.

Prototype

public Path(URI aUri) 

Source Link

Document

Construct a path from a URI

Usage

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Tests the parent directory permission when mkdirs recursively.
 *///ww  w  .ja v a2  s. c om
@Test
public void parentDirectoryPermissionForUfs() throws IOException {
    if (!(sUfs instanceof LocalUnderFileSystem) && !(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-local and non-HDFS UFSs.
        return;
    }
    Path fileA = new Path("/root/dirA/fileA");
    Path dirA = fileA.getParent();
    sTFS.mkdirs(dirA);
    short parentMode = (short) 0700;
    FsPermission newPermission = new FsPermission(parentMode);
    sTFS.setPermission(dirA, newPermission);

    create(sTFS, fileA);

    Assert.assertEquals((int) parentMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dirA)));

    // Rename from dirA to dirB, file and its parent permission should be in sync with the source
    // dirA.
    Path fileB = new Path("/root/dirB/fileB");
    Path dirB = fileB.getParent();
    sTFS.rename(dirA, dirB);
    Assert.assertEquals((int) parentMode,
            (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, fileB.getParent())));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void s3GetPermission() throws Exception {
    Assume.assumeTrue((sUfs instanceof S3UnderFileSystem) || (sUfs instanceof S3AUnderFileSystem));

    alluxio.Configuration.set(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING, "");
    Path fileA = new Path("/objectfileA");
    create(sTFS, fileA);// w  w w .  j  a v a 2 s . c  o m
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, fileA)));

    // Without providing "alluxio.underfs.s3.canonical.owner.id.to.username.mapping", the default
    // display name of the S3 owner account is NOT empty.
    Assert.assertNotEquals("", sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertNotEquals("", sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals((short) 0700, sUfs.getMode(PathUtils.concatPath(sUfsRoot, fileA)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void gcsGetPermission() throws Exception {
    Assume.assumeTrue(sUfs instanceof GCSUnderFileSystem);

    alluxio.Configuration.set(PropertyKey.UNDERFS_GCS_OWNER_ID_TO_USERNAME_MAPPING, "");
    Path fileA = new Path("/objectfileA");
    create(sTFS, fileA);/*w  w  w  . jav a  2s. co  m*/
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, fileA)));

    // Without providing "alluxio.underfs.gcs.owner.id.to.username.mapping", the default
    // display name of the GCS owner account is empty. The owner will be the GCS account id, which
    // is not empty.
    Assert.assertNotEquals("", sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertNotEquals("", sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals((short) 0700, sUfs.getMode(PathUtils.concatPath(sUfsRoot, fileA)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void swiftGetPermission() throws Exception {
    // TODO(chaomin): update Swift permission integration test once the Swift implementation is done
    Assume.assumeTrue(sUfs instanceof SwiftUnderFileSystem);

    Path fileA = new Path("/objectfileA");
    create(sTFS, fileA);//from  w w  w . j a  va  2s .  c  o  m
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, fileA)));

    // Verify the owner, group and permission of Swift UFS is not supported and thus returns default
    // values.
    Assert.assertEquals("", sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals("", sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals(Constants.DEFAULT_FILE_SYSTEM_MODE,
            sUfs.getMode(PathUtils.concatPath(sUfsRoot, fileA)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void ossGetPermission() throws Exception {
    Assume.assumeTrue(sUfs instanceof OSSUnderFileSystem);

    Path fileA = new Path("/objectfileA");
    create(sTFS, fileA);/*from   ww w. ja  va  2s . c  o m*/
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, fileA)));

    // Verify the owner, group and permission of OSS UFS is not supported and thus returns default
    // values.
    Assert.assertEquals("", sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals("", sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals(Constants.DEFAULT_FILE_SYSTEM_MODE,
            sUfs.getMode(PathUtils.concatPath(sUfsRoot, fileA)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void objectStoreSetOwner() throws Exception {
    Assume.assumeTrue(CommonUtils.isUfsObjectStorage(sUfsRoot));

    Path fileA = new Path("/objectfileA");
    final String newOwner = "new-user1";
    final String newGroup = "new-group1";
    create(sTFS, fileA);//from ww  w. j  a  va  2s .  c  o  m

    // chown to Alluxio file which is persisted in OSS is not allowed.
    mThrown.expect(IOException.class);
    mThrown.expectMessage("setOwner/setMode is not supported to object storage UFS via Alluxio.");
    sTFS.setOwner(fileA, newOwner, newGroup);
}

From source file:alluxio.hadoop.FileSystemRenameIntegrationTest.java

License:Apache License

@Test
public void basicRenameTest1() throws Exception {
    // Rename /fileA to /fileB
    Path fileA = new Path("/fileA");
    Path fileB = new Path("/fileB");

    create(sTFS, fileA);/*from   w w w. j a  va  2  s  .  c om*/

    Assert.assertTrue(sTFS.rename(fileA, fileB));

    Assert.assertFalse(sTFS.exists(fileA));
    Assert.assertTrue(sTFS.exists(fileB));
    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileA")));
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileB")));

    cleanup(sTFS);

    Assert.assertFalse(sTFS.exists(fileB));
    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileB")));
}

From source file:alluxio.hadoop.FileSystemRenameIntegrationTest.java

License:Apache License

@Test
public void basicRenameTest2() throws Exception {
    // Rename /fileA to /dirA/fileA
    Path fileA = new Path("/fileA");
    Path dirA = new Path("/dirA");
    Path finalDst = new Path("/dirA/fileA");

    create(sTFS, fileA);//ww  w  . ja  va2  s .com
    sTFS.mkdirs(dirA);

    Assert.assertTrue(sTFS.rename(fileA, finalDst));

    Assert.assertFalse(sTFS.exists(fileA));
    Assert.assertTrue(sTFS.exists(dirA));
    Assert.assertTrue(sTFS.exists(finalDst));
    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileA")));
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, "dirA")));
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, "dirA", "fileA")));

    cleanup(sTFS);

    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "dirA")));
}

From source file:alluxio.hadoop.FileSystemRenameIntegrationTest.java

License:Apache License

@Test
public void basicRenameTest3() throws Exception {
    // Rename /fileA to /dirA/fileA without specifying the full path
    Path fileA = new Path("/fileA");
    Path dirA = new Path("/dirA");
    Path finalDst = new Path("/dirA/fileA");

    create(sTFS, fileA);/*ww  w.j a  v  a  2 s.  c om*/
    sTFS.mkdirs(dirA);

    Assert.assertTrue(sTFS.rename(fileA, dirA));

    Assert.assertFalse(sTFS.exists(fileA));
    Assert.assertTrue(sTFS.exists(dirA));
    Assert.assertTrue(sTFS.exists(finalDst));
    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileA")));
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, "dirA")));
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, "dirA", "fileA")));

    cleanup(sTFS);

    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "dirA")));
}

From source file:alluxio.hadoop.FileSystemRenameIntegrationTest.java

License:Apache License

@Test
public void basicRenameTest4() throws Exception {
    // Rename /fileA to /fileA
    Path fileA = new Path("/fileA");

    create(sTFS, fileA);/*from  w  w  w.j  a va  2s.c  om*/

    Assert.assertTrue(sTFS.rename(fileA, fileA));

    Assert.assertTrue(sTFS.exists(fileA));
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileA")));

    cleanup(sTFS);

    Assert.assertFalse(sTFS.exists(fileA));
    Assert.assertFalse(sUfs.exists(PathUtils.concatPath(sUfsRoot, "fileA")));
}