Example usage for org.apache.hadoop.fs FileStatus getOwner

List of usage examples for org.apache.hadoop.fs FileStatus getOwner

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getOwner.

Prototype

public String getOwner() 

Source Link

Document

Get the owner of the file.

Usage

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setOwner(Path, String, String)} with local UFS. It will test only
 * changing the owner of file using TFS and propagate the change to UFS. Since the arbitrary
 * owner does not exist in the local UFS, the operation would fail.
 *//*from   w  w  w .  j ava 2s.  c  o m*/
@Test
public void changeNonexistentOwnerForLocal() throws Exception {
    if (!(sUfs instanceof LocalUnderFileSystem)) {
        // Skip non-local UFSs.
        return;
    }
    Path fileA = new Path("/chownfileA-local");
    final String nonexistentOwner = "nonexistent-user1";
    final String nonexistentGroup = "nonexistent-group1";

    create(sTFS, fileA);

    FileStatus fs = sTFS.getFileStatus(fileA);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals(defaultGroup, sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileA)));

    Assert.assertNotEquals(defaultOwner, nonexistentOwner);
    Assert.assertNotEquals(defaultGroup, nonexistentGroup);

    // Expect a IOException for not able to setOwner for UFS with invalid owner name.
    mThrown.expect(IOException.class);
    mThrown.expectMessage("Could not setOwner for UFS file");
    sTFS.setOwner(fileA, nonexistentOwner, null);
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setOwner(Path, String, String)} with local UFS. It will test only
 * changing the group of file using TFS and propagate the change to UFS. Since the arbitrary
 * group does not exist in the local UFS, the operation would fail.
 *///ww  w.  j a  v  a  2  s  .  c  o  m
@Test
public void changeNonexistentGroupForLocal() throws Exception {
    if (!(sUfs instanceof LocalUnderFileSystem)) {
        // Skip non-local UFSs.
        return;
    }
    Path fileB = new Path("/chownfileB-local");
    final String nonexistentOwner = "nonexistent-user1";
    final String nonexistentGroup = "nonexistent-group1";

    create(sTFS, fileB);

    FileStatus fs = sTFS.getFileStatus(fileB);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileB)));
    Assert.assertEquals(defaultGroup, sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileB)));

    Assert.assertNotEquals(defaultOwner, nonexistentOwner);
    Assert.assertNotEquals(defaultGroup, nonexistentGroup);

    // Expect a IOException for not able to setOwner for UFS with invalid group name.
    mThrown.expect(IOException.class);
    mThrown.expectMessage("Could not setOwner for UFS file");
    sTFS.setOwner(fileB, null, nonexistentGroup);
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setOwner(Path, String, String)} with local UFS. It will test
 * changing both owner and group of file using TFS and propagate the change to UFS. Since the
 * arbitrary owner and group do not exist in the local UFS, the operation would fail.
 *//*from w w  w  .  ja v  a2  s. c o m*/
@Test
public void changeNonexistentOwnerAndGroupForLocal() throws Exception {
    if (!(sUfs instanceof LocalUnderFileSystem)) {
        // Skip non-local UFSs.
        return;
    }
    Path fileC = new Path("/chownfileC-local");
    final String nonexistentOwner = "nonexistent-user1";
    final String nonexistentGroup = "nonexistent-group1";

    create(sTFS, fileC);

    FileStatus fs = sTFS.getFileStatus(fileC);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileC)));
    Assert.assertEquals(defaultGroup, sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileC)));

    Assert.assertNotEquals(defaultOwner, nonexistentOwner);
    Assert.assertNotEquals(defaultGroup, nonexistentGroup);

    mThrown.expect(IOException.class);
    mThrown.expectMessage("Could not setOwner for UFS file");
    sTFS.setOwner(fileC, nonexistentOwner, nonexistentGroup);
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setOwner(Path, String, String)} with HDFS UFS. It will test only
 * changing the owner of file using TFS and propagate the change to UFS.
 *//*from w  w  w  .  ja v  a 2s .  c  o m*/
@Test
public void changeNonexistentOwnerForHdfs() throws Exception {
    if (!(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-HDFS UFSs.
        return;
    }
    Path fileA = new Path("/chownfileA-hdfs");
    final String testOwner = "test-user1";
    final String testGroup = "test-group1";

    create(sTFS, fileA);

    FileStatus fs = sTFS.getFileStatus(fileA);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    // Group can different because HDFS user to group mapping can be different from that in Alluxio.

    Assert.assertNotEquals(defaultOwner, testOwner);
    Assert.assertNotEquals(defaultGroup, testGroup);

    // Expect a IOException for not able to setOwner for UFS with invalid owner name.
    sTFS.setOwner(fileA, testOwner, null);

    fs = sTFS.getFileStatus(fileA);
    Assert.assertEquals(testOwner, fs.getOwner());
    Assert.assertEquals(defaultGroup, fs.getGroup());
    Assert.assertEquals(testOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileA)));
    Assert.assertEquals(defaultGroup, sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileA)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setOwner(Path, String, String)} with HDFS UFS. It will test only
 * changing the group of file using TFS and propagate the change to UFS.
 *///  www. ja  va2  s.c  o  m
@Test
public void changeNonexistentGroupForHdfs() throws Exception {
    if (!(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-HDFS UFSs.
        return;
    }
    Path fileB = new Path("/chownfileB-hdfs");
    final String testOwner = "test-user1";
    final String testGroup = "test-group1";

    create(sTFS, fileB);

    FileStatus fs = sTFS.getFileStatus(fileB);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileB)));
    // Group can different because HDFS user to group mapping can be different from that in Alluxio.

    Assert.assertNotEquals(defaultOwner, testOwner);
    Assert.assertNotEquals(defaultGroup, testGroup);

    sTFS.setOwner(fileB, null, testGroup);
    fs = sTFS.getFileStatus(fileB);
    Assert.assertEquals(defaultOwner, fs.getOwner());
    Assert.assertEquals(testGroup, fs.getGroup());
    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileB)));
    Assert.assertEquals(testGroup, sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileB)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setOwner(Path, String, String)} with HDFS UFS. It will test
 * changing both owner and group of file using TFS and propagate the change to UFS.
 *//*from   w w w .  j a  va 2s . com*/
@Test
public void changeNonexistentOwnerAndGroupForHdfs() throws Exception {
    if (!(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-HDFS UFSs.
        return;
    }
    Path fileC = new Path("/chownfileC-hdfs");
    final String testOwner = "test-user1";
    final String testGroup = "test-group1";

    create(sTFS, fileC);

    FileStatus fs = sTFS.getFileStatus(fileC);
    String defaultOwner = fs.getOwner();
    String defaultGroup = fs.getGroup();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileC)));
    // Group can different because HDFS user to group mapping can be different from that in Alluxio.

    Assert.assertNotEquals(defaultOwner, testOwner);
    Assert.assertNotEquals(defaultGroup, testGroup);

    sTFS.setOwner(fileC, testOwner, testGroup);
    fs = sTFS.getFileStatus(fileC);
    Assert.assertEquals(testOwner, fs.getOwner());
    Assert.assertEquals(testGroup, fs.getGroup());
    Assert.assertEquals(testOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, fileC)));
    Assert.assertEquals(testGroup, sUfs.getGroup(PathUtils.concatPath(sUfsRoot, fileC)));
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Tests the directory permission propagation to UFS.
 *///  w  w  w  .  j av a2 s .c  o  m
@Test
public void directoryPermissionForUfs() throws IOException {
    if (!(sUfs instanceof LocalUnderFileSystem) && !(sUfs instanceof HdfsUnderFileSystem)) {
        // Skip non-local and non-HDFS UFSs.
        return;
    }
    Path dir = new Path("/root/dir/");
    sTFS.mkdirs(dir);

    FileStatus fs = sTFS.getFileStatus(dir);
    String defaultOwner = fs.getOwner();
    Short dirMode = fs.getPermission().toShort();
    FileStatus parentFs = sTFS.getFileStatus(dir.getParent());
    Short parentMode = parentFs.getPermission().toShort();

    Assert.assertEquals(defaultOwner, sUfs.getOwner(PathUtils.concatPath(sUfsRoot, dir)));
    Assert.assertEquals((int) dirMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir)));
    Assert.assertEquals((int) parentMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir.getParent())));

    short newMode = (short) 0755;
    FsPermission newPermission = new FsPermission(newMode);
    sTFS.setPermission(dir, newPermission);

    Assert.assertEquals((int) newMode, (int) sUfs.getMode(PathUtils.concatPath(sUfsRoot, dir)));
}

From source file:alluxio.hadoop.HadoopUtils.java

License:Apache License

/**
 * Returns a string representation of a Hadoop {@link FileStatus}.
 *
 * @param fs Hadoop {@link FileStatus}/*from   ww w .ja  v  a2 s  . com*/
 * @return its string representation
 */
public static String toStringHadoopFileStatus(FileStatus fs) {
    StringBuilder sb = new StringBuilder();
    sb.append("HadoopFileStatus: Path: ").append(fs.getPath());
    sb.append(" , Length: ").append(fs.getLen());
    // Use isDir instead of isDirectory for compatibility with hadoop 1.
    sb.append(" , IsDir: ").append(fs.isDir());
    sb.append(" , BlockReplication: ").append(fs.getReplication());
    sb.append(" , BlockSize: ").append(fs.getBlockSize());
    sb.append(" , ModificationTime: ").append(fs.getModificationTime());
    sb.append(" , AccessTime: ").append(fs.getAccessTime());
    sb.append(" , Permission: ").append(fs.getPermission());
    sb.append(" , Owner: ").append(fs.getOwner());
    sb.append(" , Group: ").append(fs.getGroup());
    return sb.toString();
}

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

@Override
public void setOwner(String path, String user, String group) throws IOException {
    try {/*from  w w w .j ava  2  s . c  o m*/
        FileStatus fileStatus = mFileSystem.getFileStatus(new Path(path));
        LOG.info("Changing file '{}' user from: {} to {}, group from: {} to {}", fileStatus.getPath(),
                fileStatus.getOwner(), user, fileStatus.getGroup(), group);
        mFileSystem.setOwner(fileStatus.getPath(), user, group);
    } catch (IOException e) {
        LOG.error("Fail to set owner for {} with user: {}, group: {}", path, user, group, e);
        LOG.warn("In order for Alluxio to create HDFS files with the correct user and groups, "
                + "Alluxio should be added to the HDFS superusers.");
        throw e;
    }
}

From source file:ch.cern.db.hdfs.Main.java

License:GNU General Public License

private void printFileStatus(FileStatus status) {
    System.out.println();/*w ww .  j a  va2  s . com*/
    System.out.println("Showing metadata for: " + status.getPath());
    System.out.println("   isDirectory: " + status.isDirectory());
    System.out.println("   isFile: " + status.isFile());
    System.out.println("   isSymlink: " + status.isSymlink());
    System.out.println("   encrypted: " + status.isEncrypted());
    System.out.println("   length: " + status.getLen());
    System.out.println("   replication: " + status.getReplication());
    System.out.println("   blocksize: " + status.getBlockSize());
    System.out.println("   modification_time: " + new Date(status.getModificationTime()));
    System.out.println("   access_time: " + new Date(status.getAccessTime()));
    System.out.println("   owner: " + status.getOwner());
    System.out.println("   group: " + status.getGroup());
    System.out.println("   permission: " + status.getPermission());
    System.out.println();
}