Example usage for org.apache.hadoop.fs.permission FsAction READ_EXECUTE

List of usage examples for org.apache.hadoop.fs.permission FsAction READ_EXECUTE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsAction READ_EXECUTE.

Prototype

FsAction READ_EXECUTE

To view the source code for org.apache.hadoop.fs.permission FsAction READ_EXECUTE.

Click Source Link

Usage

From source file:TestParascaleFileStatus.java

License:Apache License

public void testLoadPermissionInfo() {
    final Path p = new Path("/foo/bar");
    {/*from   w  ww .j a  v a2  s .  c om*/
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.READ_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());
    }
    {
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw--wxr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        assertEquals(32 * 1024 * 1024, parascaleFileStatus.getBlockSize());
        assertEquals("parascale", parascaleFileStatus.getOwner());
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.WRITE_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());

    }
    final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
            32 * 1024 * 1024, System.currentTimeMillis(), p);
    parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
    assertEquals("permissions already loaded - should be lazy", 0, parascaleFileStatus.count.get());
    parascaleFileStatus.getPermission();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getOwner();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getGroup();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
}

From source file:com.cloudera.recordbreaker.fisheye.AccessController.java

License:Open Source License

public boolean hasReadAccess(FileSummary fs) {
    String fileOwner = fs.getOwner();
    String fileGroup = fs.getGroup();
    FsPermission fsp = fs.getPermissions();

    // Check world-readable
    FsAction otherAction = fsp.getOtherAction();
    if (otherAction == FsAction.ALL || otherAction == FsAction.READ || otherAction == FsAction.READ_EXECUTE
            || otherAction == FsAction.READ_WRITE) {
        return true;
    }//from   ww w  .j  a va  2  s.c  o  m

    // Check group-readable
    // REMIND -- mjc -- implement group-readable testing when we have the user database
    // that will tell us the current logged-in-user's groups.

    // Check owner-readable
    if (currentUser != null && currentUser.equals(fileOwner)) {
        FsAction userAction = fsp.getUserAction();
        if (userAction == FsAction.ALL || userAction == FsAction.READ || userAction == FsAction.READ_EXECUTE
                || userAction == FsAction.READ_WRITE) {
            return true;
        }
    }

    return false;
}

From source file:com.linkedin.cubert.utils.AvroUtils.java

License:Open Source License

public static void createFileIfNotExists(BlockSchema fileSchema, String path) throws IOException {
    Configuration conf = new JobConf();
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(new Path(path)))
        return;//  w  w w. j a va 2s  .c o m

    Schema avroSchema = convertFromBlockSchema("CUBERT_MV_RECORD", fileSchema);
    System.out.println("Creating avro file with schema = " + avroSchema);
    GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(avroSchema);
    DataFileWriter<GenericRecord> writer = new DataFileWriter<GenericRecord>(datumWriter);

    FSDataOutputStream fout = FileSystem.create(fs, new Path(path),
            new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE));
    writer.create(avroSchema, fout);
    writer.flush();
    writer.close();

}

From source file:com.pentaho.big.data.bundles.impl.shim.hdfs.HadoopFileSystemImplTest.java

License:Apache License

@Test
public void testChmod() throws IOException {
    when(hadoopFileSystemPath.toString()).thenReturn(pathString);
    hadoopFileSystem.chmod(hadoopFileSystemPath, 753);
    verify(fileSystem).setPermission(eq(new Path(pathString)),
            eq(new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.WRITE_EXECUTE)));
}

From source file:com.thinkbiganalytics.datalake.authorization.hdfs.HDFSUtil.java

License:Apache License

/**
 * @param hdfsPermission : Permission assgined by user.
 * @return : Final Permission to be set for creating ACL
 *//*from w  ww . j a v  a  2s  .  co  m*/

private FsAction getFinalPermission(String hdfsPermission) {

    HashMap<String, Integer> standardPermissionMap = new HashMap<>();

    String[] permissions = hdfsPermission.split(",");
    standardPermissionMap.put(READ, 0);
    standardPermissionMap.put(WRITE, 0);
    standardPermissionMap.put(EXECUTE, 0);
    standardPermissionMap.put(NONE, 0);
    standardPermissionMap.put(ALL, 0);

    for (String permission : permissions) {

        permission = permission.toLowerCase();

        switch (permission) {
        case READ:
            standardPermissionMap.put(READ, 1);
            break;
        case WRITE:
            standardPermissionMap.put(WRITE, 1);
            break;
        case EXECUTE:
            standardPermissionMap.put(EXECUTE, 1);
            break;
        case ALL:
            return FsAction.ALL;
        case NONE:
            return FsAction.NONE;
        default:
            standardPermissionMap.put(NONE, 1);
        }
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(WRITE) == 1
            && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.ALL;
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(WRITE) == 1) {
        return FsAction.READ_WRITE;
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.READ_EXECUTE;
    }

    if (standardPermissionMap.get(WRITE) == 1 && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.WRITE_EXECUTE;
    }

    if (standardPermissionMap.get(WRITE) == 1) {
        return FsAction.WRITE;
    }

    if (standardPermissionMap.get(READ) == 1) {
        return FsAction.READ;
    }

    if (standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.EXECUTE;
    }

    // Default Permission - None
    return FsAction.NONE;

}

From source file:com.trendmicro.hdfs.webdav.test.TestCopySimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/rw"),
                    new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/ro"),
                    new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/rw/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);/*  w w  w .j a va  2 s .com*/
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestDeleteSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/private"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/private/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);//w ww  .  j av  a  2s  .com
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/private/file2"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file3"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file4"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestMkcolSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/private"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            return null;
        }/* w w w . ja v a2s.c o  m*/
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestMoveSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/owner"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/owner/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);// w w w. java  2 s .  c  om
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestPropfindSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            for (Path dir : publicDirPaths) {
                assertTrue(//from  w  w w.  j  ava2s  .  c o  m
                        fs.mkdirs(dir, new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            }
            for (Path dir : privateDirPaths) {
                assertTrue(fs.mkdirs(dir, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)));
            }
            for (Path path : publicFilePaths) {
                FSDataOutputStream os = fs.create(path,
                        new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1,
                        65536, null);
                assertNotNull(os);
                os.write(testPublicData.getBytes());
                os.close();
            }
            for (Path path : privateFilePaths) {
                FSDataOutputStream os = fs.create(path,
                        new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1,
                        65536, null);
                assertNotNull(os);
                os.write(testPrivateData.getBytes());
                os.close();
            }
            return null;
        }
    });

}