Example usage for org.apache.hadoop.fs.permission FsAction NONE

List of usage examples for org.apache.hadoop.fs.permission FsAction NONE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsAction NONE.

Prototype

FsAction NONE

To view the source code for org.apache.hadoop.fs.permission FsAction NONE.

Click Source Link

Usage

From source file:co.cask.cdap.app.runtime.spark.distributed.SparkExecutionServiceTest.java

License:Apache License

@Test
public void testWriteCredentials() throws Exception {
    ProgramRunId programRunId = new ProgramRunId("ns", "app", ProgramType.SPARK, "test",
            RunIds.generate().getId());//from  w  w  w.  j  a v  a  2s . c  o  m

    // Start a service that doesn't support workflow token
    SparkExecutionService service = new SparkExecutionService(locationFactory,
            InetAddress.getLoopbackAddress().getCanonicalHostName(), programRunId, null);
    service.startAndWait();
    try {
        SparkExecutionClient client = new SparkExecutionClient(service.getBaseURI(), programRunId);

        Location targetLocation = locationFactory.create(UUID.randomUUID().toString()).append("credentials");
        client.writeCredentials(targetLocation);

        FileStatus status = dfsCluster.getFileSystem().getFileStatus(new Path(targetLocation.toURI()));
        // Verify the file permission is 600
        Assert.assertEquals(FsAction.READ_WRITE, status.getPermission().getUserAction());
        Assert.assertEquals(FsAction.NONE, status.getPermission().getGroupAction());
        Assert.assertEquals(FsAction.NONE, status.getPermission().getOtherAction());

        // Should be able to deserialize back to credentials
        Credentials credentials = new Credentials();
        try (DataInputStream is = new DataInputStream(targetLocation.getInputStream())) {
            credentials.readTokenStorageStream(is);
        }

        // Call complete to notify the service it has been stopped
        client.completed(null);
    } finally {
        service.stopAndWait();
    }
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.handlers.TestACCESSHandler.java

License:Apache License

@Test
public void testPerms() throws Exception {
    List<PermTest> perms = Lists.newArrayList();
    // read for owner when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for group when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.READ, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when not owner
    perms.add(new PermTest("notroot", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when not owner
    perms.add(new PermTest("root", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));
    // read for other when not owner or group
    perms.add(new PermTest("notroot", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.READ),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP));

    // write for owner when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.WRITE, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for group when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.WRITE, FsAction.NONE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for other when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for other when not owner
    perms.add(new PermTest("notroot", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND));
    // write for other when not owner
    perms.add(new PermTest("root", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
            NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE));
    // write for other when not owner or group
    perms.add(//from www  . j a  va  2  s . c  o m
            new PermTest("notroot", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.WRITE),
                    NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND));

    // execute for owner when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_EXECUTE));
    // execute for group when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.EXECUTE, FsAction.NONE),
            NFS_ACCESS_EXECUTE));
    // execute for other when owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE),
            NFS_ACCESS_EXECUTE));
    // execute for other when not owner
    perms.add(new PermTest("notroot", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE),
            NFS_ACCESS_EXECUTE));
    // execute for other when not owner
    perms.add(new PermTest("root", "notwheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE),
            NFS_ACCESS_EXECUTE));
    // execute for other when not owner or group
    perms.add(new PermTest("notroot", "notwheel",
            new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.EXECUTE), NFS_ACCESS_EXECUTE));
    // no perms but owner, this might be rethought?
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE), 0));
    // all for user/group but not user/groups
    perms.add(new PermTest("notroot", "notwheel", new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE),
            0));
    // all for user/group but not user/group
    perms.add(
            new PermTest("notroot", "wheel", new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE), 0));
    // owner has all, is owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE
                    | NFS_ACCESS_EXECUTE));
    // group has all is owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.NONE),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE
                    | NFS_ACCESS_EXECUTE));
    // other has all is owner
    perms.add(new PermTest("root", "wheel", new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.ALL),
            NFS_ACCESS_READ | NFS_ACCESS_LOOKUP | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE
                    | NFS_ACCESS_EXECUTE));

    for (PermTest permTest : perms) {
        when(filePermissions.toShort()).thenReturn(permTest.perm.toShort());
        int result = ACCESSHandler.getPermsForUserGroup(permTest.user, new String[] { permTest.group },
                fileStatus);
        assertEquals(permTest.toString(), Integer.toBinaryString(permTest.result),
                Integer.toBinaryString(result));
    }
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testCreate(Path path, boolean override) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    FileSystem fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, (short) 2,
            100 * 1024 * 1024, null);/*from  w  ww .  j a  v  a 2s. c om*/
    os.write(1);
    os.close();
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status = fs.getFileStatus(path);
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
    Assert.assertEquals(status.getPermission(), permission);
    InputStream is = fs.open(path);
    Assert.assertEquals(is.read(), 1);
    is.close();
    fs.close();
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testSetPermission() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path path = new Path(getHadoopTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);/*from w w w .  j  a v  a2s  .  c o m*/
    os.close();
    fs.close();

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getHadoopConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);
}

From source file:com.datatorrent.stram.util.FSUtil.java

License:Apache License

/**
 * Download the file from dfs to local file.
 *
 * @param fs//from ww  w.ja  va2s.  co  m
 * @param destinationFile
 * @param dfsFile
 * @param conf
 * @return
 * @throws IOException
 */
public static File copyToLocalFileSystem(FileSystem fs, String destinationPath, String destinationFile,
        String dfsFile, Configuration conf) throws IOException {
    File destinationDir = new File(destinationPath);
    if (!destinationDir.exists() && !destinationDir.mkdirs()) {
        throw new RuntimeException("Unable to create local directory");
    }
    RawLocalFileSystem localFileSystem = new RawLocalFileSystem();
    try {
        // allow app user to access local dir
        FsPermission permissions = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
        localFileSystem.setPermission(new Path(destinationDir.getAbsolutePath()), permissions);

        Path dfsFilePath = new Path(dfsFile);
        File localFile = new File(destinationDir, destinationFile);
        FileUtil.copy(fs, dfsFilePath, localFile, false, conf);
        // set permissions on actual file to be read-only for user
        permissions = new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE);
        localFileSystem.setPermission(new Path(localFile.getAbsolutePath()), permissions);
        return localFile;
    } finally {
        localFileSystem.close();
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.util.TestHdfsUtils.java

License:Apache License

@Test
public void testParseFsPermission() {
    FsPermission permission;// w  w w.  j a va 2s.  c o  m

    /*
    // Not testing string constants as they behave differently in different Hadoop versions
    permission = HdfsUtils.parseFsPermission("a-rwx"); // Pre HADOOP-13508
    permission = HdfsUtils.parseFsPermission("a=rwx"); // Post HADOOP-13508
    assertEquals(FsAction.ALL, permission.getUserAction());
    assertEquals(FsAction.ALL, permission.getGroupAction());
    assertEquals(FsAction.ALL, permission.getOtherAction());
    */

    // Octal format
    permission = HdfsUtils.parseFsPermission("770");
    assertEquals(FsAction.ALL, permission.getUserAction());
    assertEquals(FsAction.ALL, permission.getGroupAction());
    assertEquals(FsAction.NONE, permission.getOtherAction());

    // Unix format
    permission = HdfsUtils.parseFsPermission("rwxrwx---");
    assertEquals(FsAction.ALL, permission.getUserAction());
    assertEquals(FsAction.ALL, permission.getGroupAction());
    assertEquals(FsAction.NONE, permission.getOtherAction());
}

From source file:com.thinkbiganalytics.datalake.authorization.hdfs.HDFSUtil.java

License:Apache License

/**
 * @param fileSystem : HDFS fileSystem object
 * @param path       : Path on which ACL needs to be created
 * @param groups     : List of group to which permission needs to be granted.
 *//*from ww w. j ava  2 s . c om*/

public void listAllDirAndApplyPolicy(FileSystem fileSystem, Path path, String groups, String hdfsPermission)
        throws FileNotFoundException, IOException {
    FsAction fsActionObject = getFinalPermission(hdfsPermission);
    FileStatus[] fileStatus = fileSystem.listStatus(path);

    for (FileStatus status : fileStatus) {

        // Flush ACL before creating new one.
        flushAcl(fileSystem, status.getPath());

        // Apply ACL recursively on each file/directory.
        if (status.isDirectory()) {
            String[] groupListForPermission = groups.split(",");
            for (int groupCounter = 0; groupCounter < groupListForPermission.length; groupCounter++) {

                // Create HDFS ACL for each for each Path on HDFS
                AclEntry aclEntryOwner = new AclEntry.Builder().setName(groupListForPermission[groupCounter])
                        .setPermission(fsActionObject).setScope(AclEntryScope.ACCESS)
                        .setType(AclEntryType.GROUP).build();

                AclEntry aclEntryOther = new AclEntry.Builder().setPermission(FsAction.NONE)
                        .setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).build();

                // Apply ACL on Path
                applyAcl(fileSystem, status.getPath(), aclEntryOwner);
                applyAcl(fileSystem, status.getPath(), aclEntryOther);

            }

            // Recursive call made to apply acl on each sub directory
            listAllDirAndApplyPolicy(fileSystem, status.getPath(), groups, hdfsPermission);
        } else {
            String[] groupListForPermission = groups.split(",");
            for (int groupCounter = 0; groupCounter < groupListForPermission.length; groupCounter++) {

                // Create HDFS ACL for each for each Path on HDFS
                AclEntry aclEntryOwner = new AclEntry.Builder().setName(groupListForPermission[groupCounter])
                        .setPermission(fsActionObject).setScope(AclEntryScope.ACCESS)
                        .setType(AclEntryType.GROUP).build();

                AclEntry aclEntryOther = new AclEntry.Builder().setPermission(FsAction.NONE)
                        .setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).build();

                // Apply ACL on Path
                applyAcl(fileSystem, status.getPath(), aclEntryOwner);
                applyAcl(fileSystem, status.getPath(), aclEntryOther);

            }
        }
    }
}

From source file:com.thinkbiganalytics.datalake.authorization.hdfs.HDFSUtil.java

License:Apache License

/**
 * @param hdfsPermission : Permission assgined by user.
 * @return : Final Permission to be set for creating ACL
 *//*  w  w w  .  j  a  va2 s . c  o  m*/

private FsAction getFinalPermission(String hdfsPermission) {

    HashMap<String, Integer> standardPermissionMap = new HashMap<>();

    String[] permissions = hdfsPermission.split(",");
    standardPermissionMap.put(READ, 0);
    standardPermissionMap.put(WRITE, 0);
    standardPermissionMap.put(EXECUTE, 0);
    standardPermissionMap.put(NONE, 0);
    standardPermissionMap.put(ALL, 0);

    for (String permission : permissions) {

        permission = permission.toLowerCase();

        switch (permission) {
        case READ:
            standardPermissionMap.put(READ, 1);
            break;
        case WRITE:
            standardPermissionMap.put(WRITE, 1);
            break;
        case EXECUTE:
            standardPermissionMap.put(EXECUTE, 1);
            break;
        case ALL:
            return FsAction.ALL;
        case NONE:
            return FsAction.NONE;
        default:
            standardPermissionMap.put(NONE, 1);
        }
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(WRITE) == 1
            && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.ALL;
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(WRITE) == 1) {
        return FsAction.READ_WRITE;
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.READ_EXECUTE;
    }

    if (standardPermissionMap.get(WRITE) == 1 && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.WRITE_EXECUTE;
    }

    if (standardPermissionMap.get(WRITE) == 1) {
        return FsAction.WRITE;
    }

    if (standardPermissionMap.get(READ) == 1) {
        return FsAction.READ;
    }

    if (standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.EXECUTE;
    }

    // Default Permission - None
    return FsAction.NONE;

}

From source file:com.trendmicro.hdfs.webdav.test.TestCopySimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/rw"),
                    new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/ro"),
                    new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/rw/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);/*from   w  w  w .j  av  a 2  s  .  c o m*/
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestDeleteSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/private"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/private/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);//from   w ww  .  j  ava  2s . c  o  m
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/private/file2"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file3"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file4"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}