Example usage for org.apache.hadoop.fs.permission FsAction WRITE_EXECUTE

List of usage examples for org.apache.hadoop.fs.permission FsAction WRITE_EXECUTE

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsAction WRITE_EXECUTE.

Prototype

FsAction WRITE_EXECUTE

To view the source code for org.apache.hadoop.fs.permission FsAction WRITE_EXECUTE.

Click Source Link

Usage

From source file:TestParascaleFileStatus.java

License:Apache License

public void testLoadPermissionInfo() {
    final Path p = new Path("/foo/bar");
    {//  w  w  w.  j  a  v  a  2  s.  c om
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.READ_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());
    }
    {
        final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
                32 * 1024 * 1024, System.currentTimeMillis(), p);
        parascaleFileStatus.permissionString = "-rw--wxr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
        assertEquals(32 * 1024 * 1024, parascaleFileStatus.getBlockSize());
        assertEquals("parascale", parascaleFileStatus.getOwner());
        final FsPermission permission = parascaleFileStatus.getPermission();
        assertEquals(FsAction.READ, permission.getOtherAction());
        assertEquals(FsAction.WRITE_EXECUTE, permission.getGroupAction());
        assertEquals(FsAction.READ_WRITE, permission.getUserAction());

    }
    final ParascaleFileStatusMock parascaleFileStatus = new ParascaleFileStatusMock(10, false, 2,
            32 * 1024 * 1024, System.currentTimeMillis(), p);
    parascaleFileStatus.permissionString = "-rw-r-xr-- 1 parascale parascale 0 Sep  9 12:37 16:43 bar";
    assertEquals("permissions already loaded - should be lazy", 0, parascaleFileStatus.count.get());
    parascaleFileStatus.getPermission();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getOwner();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
    parascaleFileStatus.getGroup();
    assertEquals("permissions loaded more than once", 1, parascaleFileStatus.count.get());
}

From source file:com.pentaho.big.data.bundles.impl.shim.hdfs.HadoopFileSystemImplTest.java

License:Apache License

@Test
public void testChmod() throws IOException {
    when(hadoopFileSystemPath.toString()).thenReturn(pathString);
    hadoopFileSystem.chmod(hadoopFileSystemPath, 753);
    verify(fileSystem).setPermission(eq(new Path(pathString)),
            eq(new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.WRITE_EXECUTE)));
}

From source file:com.thinkbiganalytics.datalake.authorization.hdfs.HDFSUtil.java

License:Apache License

/**
 * @param hdfsPermission : Permission assgined by user.
 * @return : Final Permission to be set for creating ACL
 *//*from w  w  w  . j av a2 s  .  com*/

private FsAction getFinalPermission(String hdfsPermission) {

    HashMap<String, Integer> standardPermissionMap = new HashMap<>();

    String[] permissions = hdfsPermission.split(",");
    standardPermissionMap.put(READ, 0);
    standardPermissionMap.put(WRITE, 0);
    standardPermissionMap.put(EXECUTE, 0);
    standardPermissionMap.put(NONE, 0);
    standardPermissionMap.put(ALL, 0);

    for (String permission : permissions) {

        permission = permission.toLowerCase();

        switch (permission) {
        case READ:
            standardPermissionMap.put(READ, 1);
            break;
        case WRITE:
            standardPermissionMap.put(WRITE, 1);
            break;
        case EXECUTE:
            standardPermissionMap.put(EXECUTE, 1);
            break;
        case ALL:
            return FsAction.ALL;
        case NONE:
            return FsAction.NONE;
        default:
            standardPermissionMap.put(NONE, 1);
        }
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(WRITE) == 1
            && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.ALL;
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(WRITE) == 1) {
        return FsAction.READ_WRITE;
    }

    if (standardPermissionMap.get(READ) == 1 && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.READ_EXECUTE;
    }

    if (standardPermissionMap.get(WRITE) == 1 && standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.WRITE_EXECUTE;
    }

    if (standardPermissionMap.get(WRITE) == 1) {
        return FsAction.WRITE;
    }

    if (standardPermissionMap.get(READ) == 1) {
        return FsAction.READ;
    }

    if (standardPermissionMap.get(EXECUTE) == 1) {
        return FsAction.EXECUTE;
    }

    // Default Permission - None
    return FsAction.NONE;

}

From source file:com.trendmicro.hdfs.webdav.test.TestCopySimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/rw"),
                    new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/ro"),
                    new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/rw/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);//from w  ww.j  a  v a2 s.  c  o m
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestPutSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/rw"),
                    new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/ro"),
                    new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            return null;
        }//  w  ww . ja v  a2  s.c o m
    });
}

From source file:org.apache.hcatalog.common.TestHCatUtil.java

License:Apache License

@Test
public void testValidateMorePermissive() {
    assertConsistentFsPermissionBehaviour(FsAction.ALL, true, true, true, true, true, true, true, true);
    assertConsistentFsPermissionBehaviour(FsAction.READ, false, true, false, true, false, false, false, false);
    assertConsistentFsPermissionBehaviour(FsAction.WRITE, false, true, false, false, true, false, false, false);
    assertConsistentFsPermissionBehaviour(FsAction.EXECUTE, false, true, true, false, false, false, false,
            false);/*from  www.ja v  a2  s .  c  om*/
    assertConsistentFsPermissionBehaviour(FsAction.READ_EXECUTE, false, true, true, true, false, true, false,
            false);
    assertConsistentFsPermissionBehaviour(FsAction.READ_WRITE, false, true, false, true, true, false, true,
            false);
    assertConsistentFsPermissionBehaviour(FsAction.WRITE_EXECUTE, false, true, true, false, true, false, false,
            true);
    assertConsistentFsPermissionBehaviour(FsAction.NONE, false, true, false, false, false, false, false, false);
}

From source file:org.apache.hcatalog.common.TestHCatUtil.java

License:Apache License

private void assertConsistentFsPermissionBehaviour(FsAction base, boolean versusAll, boolean versusNone,
        boolean versusX, boolean versusR, boolean versusW, boolean versusRX, boolean versusRW,
        boolean versusWX) {

    Assert.assertTrue(versusAll == HCatUtil.validateMorePermissive(base, FsAction.ALL));
    Assert.assertTrue(versusX == HCatUtil.validateMorePermissive(base, FsAction.EXECUTE));
    Assert.assertTrue(versusNone == HCatUtil.validateMorePermissive(base, FsAction.NONE));
    Assert.assertTrue(versusR == HCatUtil.validateMorePermissive(base, FsAction.READ));
    Assert.assertTrue(versusRX == HCatUtil.validateMorePermissive(base, FsAction.READ_EXECUTE));
    Assert.assertTrue(versusRW == HCatUtil.validateMorePermissive(base, FsAction.READ_WRITE));
    Assert.assertTrue(versusW == HCatUtil.validateMorePermissive(base, FsAction.WRITE));
    Assert.assertTrue(versusWX == HCatUtil.validateMorePermissive(base, FsAction.WRITE_EXECUTE));
}

From source file:org.apache.hcatalog.common.TestHCatUtil.java

License:Apache License

@Test
public void testExecutePermissionsCheck() {
    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.ALL));
    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.NONE));
    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.EXECUTE));
    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_EXECUTE));
    Assert.assertTrue(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE_EXECUTE));

    Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ));
    Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.WRITE));
    Assert.assertFalse(HCatUtil.validateExecuteBitPresentIfReadOrWrite(FsAction.READ_WRITE));

}

From source file:org.apache.sentry.hdfs.TestSentryPermissions.java

License:Apache License

/**
 * Adds aggregated user permissions and check is the ACL are properly generated.
 *///from   w  ww .j  a v a2 s. c o m
@Test
public void testSentryAggregatedUserPermissions() {
    String authorizable = null;
    // Add read permission for database
    authorizable = "db1";
    TPrivilegePrincipal userEntity = new TPrivilegePrincipal(TPrivilegePrincipalType.USER, "user1");

    SentryPermissions perms = new SentryPermissions();
    SentryPermissions.PrivilegeInfo pInfo = new SentryPermissions.PrivilegeInfo(authorizable);
    pInfo.setPermission(userEntity, FsAction.READ_EXECUTE);
    perms.addPrivilegeInfo(pInfo);

    // Add write permission for a particular table in the database.
    authorizable = "db1.tb1";
    pInfo = new SentryPermissions.PrivilegeInfo(authorizable);
    pInfo.setPermission(userEntity, FsAction.WRITE_EXECUTE);
    perms.addPrivilegeInfo(pInfo);

    List<AclEntry> acls = perms.getAcls(authorizable);
    Assert.assertEquals("Unexpected number of ACL entries received", 1, acls.size());
    Assert.assertEquals("Unexpected permission", FsAction.ALL, acls.get(0).getPermission());
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegration.java

License:Apache License

@Test
public void testEnd2End() throws Throwable {
    tmpHDFSDir = new Path("/tmp/external");
    dbNames = new String[] { "db1" };
    roles = new String[] { "admin_role", "db_role", "tab_role", "p1_admin" };
    admin = "hive";

    Connection conn;/*ww w  .  j  ava2s .  c  o m*/
    Statement stmt;
    conn = hiveServer2.createConnection("hive", "hive");
    stmt = conn.createStatement();
    stmt.execute("create role admin_role");
    stmt.execute("grant role admin_role to group hive");
    stmt.execute("grant all on server server1 to role admin_role");
    stmt.execute("create table p1 (s string) partitioned by (month int, day int)");
    stmt.execute("alter table p1 add partition (month=1, day=1)");
    stmt.execute("alter table p1 add partition (month=1, day=2)");
    stmt.execute("alter table p1 add partition (month=2, day=1)");
    stmt.execute("alter table p1 add partition (month=2, day=2)");

    // db privileges
    stmt.execute("create database db5");
    stmt.execute("create role db_role");
    stmt.execute("create role tab_role");
    stmt.execute("grant role db_role to group hbase");
    stmt.execute("grant role tab_role to group flume");
    stmt.execute("create table db5.p2(id int)");

    stmt.execute("create role p1_admin");
    stmt.execute("grant role p1_admin to group hbase");

    // Verify default db is inaccessible initially
    verifyOnAllSubDirs("/user/hive/warehouse", null, "hbase", false);

    verifyOnAllSubDirs("/user/hive/warehouse/p1", null, "hbase", false);

    stmt.execute("grant all on database db5 to role db_role");
    stmt.execute("use db5");
    stmt.execute("grant all on table p2 to role tab_role");
    stmt.execute("use default");
    verifyOnAllSubDirs("/user/hive/warehouse/db5.db", FsAction.ALL, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db5.db/p2", FsAction.ALL, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db5.db/p2", FsAction.ALL, "flume", true);
    verifyOnPath("/user/hive/warehouse/db5.db", FsAction.ALL, "flume", false);

    loadData(stmt);

    verifyHDFSandMR(stmt);

    // Verify default db is STILL inaccessible after grants but tables are fine
    verifyOnPath("/user/hive/warehouse", null, "hbase", false);
    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.READ_EXECUTE, "hbase", true);

    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            // Simulate hdfs dfs -setfacl -m <aclantry> <path>
            AclStatus existing = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1"));
            ArrayList<AclEntry> newEntries = new ArrayList<AclEntry>(existing.getEntries());
            newEntries.add(AclEntry.parseAclEntry("user::---", true));
            newEntries.add(AclEntry.parseAclEntry("group:bla:rwx", true));
            newEntries.add(AclEntry.parseAclEntry("other::---", true));
            miniDFS.getFileSystem().setAcl(new Path("/user/hive/warehouse/p1"), newEntries);
            return null;
        }
    });

    stmt.execute("revoke select on table p1 from role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/p1", null, "hbase", false);

    // Verify default db grants work
    stmt.execute("grant select on database default to role p1_admin");
    verifyOnPath("/user/hive/warehouse", FsAction.READ_EXECUTE, "hbase", true);

    // Verify default db grants are propagated to the tables
    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.READ_EXECUTE, "hbase", true);

    // Verify default db revokes work
    stmt.execute("revoke select on database default from role p1_admin");
    verifyOnPath("/user/hive/warehouse", null, "hbase", false);
    verifyOnAllSubDirs("/user/hive/warehouse/p1", null, "hbase", false);

    stmt.execute("grant all on table p1 to role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.ALL, "hbase", true);

    stmt.execute("revoke select on table p1 from role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.WRITE_EXECUTE, "hbase", true);

    // Verify table rename works
    stmt.execute("alter table p1 rename to p3");
    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);

    stmt.execute("alter table p3 partition (month=1, day=1) rename to partition (month=1, day=3)");
    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/p3/month=1/day=3", FsAction.WRITE_EXECUTE, "hbase", true);

    // Test DB case insensitivity
    stmt.execute("create database extdb");
    stmt.execute("grant all on database ExtDb to role p1_admin");
    writeToPath("/tmp/external/ext100", 5, "foo", "bar");
    writeToPath("/tmp/external/ext101", 5, "foo", "bar");
    stmt.execute("use extdb");
    stmt.execute("create table ext100 (s string) location \'/tmp/external/ext100\'");
    verifyQuery(stmt, "ext100", 5);
    verifyOnAllSubDirs("/tmp/external/ext100", FsAction.ALL, "hbase", true);
    stmt.execute("use default");

    stmt.execute("use EXTDB");
    stmt.execute("create table ext101 (s string) location \'/tmp/external/ext101\'");
    verifyQuery(stmt, "ext101", 5);
    verifyOnAllSubDirs("/tmp/external/ext101", FsAction.ALL, "hbase", true);

    // Test table case insensitivity
    stmt.execute("grant all on table exT100 to role tab_role");
    verifyOnAllSubDirs("/tmp/external/ext100", FsAction.ALL, "flume", true);

    stmt.execute("use default");

    //TODO: SENTRY-795: HDFS permissions do not sync when Sentry restarts in HA mode.
    if (!testSentryHA) {
        long beforeStop = System.currentTimeMillis();
        sentryServer.stopAll();
        long timeTakenForStopMs = System.currentTimeMillis() - beforeStop;
        LOGGER.info("Time taken for Sentry server stop: " + timeTakenForStopMs);

        // Verify that Sentry permission are still enforced for the "stale" period only if stop did not take too long
        if (timeTakenForStopMs < STALE_THRESHOLD) {
            verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);
            Thread.sleep((STALE_THRESHOLD - timeTakenForStopMs));
        } else {
            LOGGER.warn("Sentry server stop took too long");
        }

        // Verify that Sentry permission are NOT enforced AFTER "stale" period
        verifyOnAllSubDirs("/user/hive/warehouse/p3", null, "hbase", false);

        sentryServer.startAll();
    }

    // Verify that After Sentry restart permissions are re-enforced
    verifyOnAllSubDirs("/user/hive/warehouse/p3", FsAction.WRITE_EXECUTE, "hbase", true);

    // Create new table and verify everything is fine after restart...
    stmt.execute("create table p2 (s string) partitioned by (month int, day int)");
    stmt.execute("alter table p2 add partition (month=1, day=1)");
    stmt.execute("alter table p2 add partition (month=1, day=2)");
    stmt.execute("alter table p2 add partition (month=2, day=1)");
    stmt.execute("alter table p2 add partition (month=2, day=2)");

    verifyOnAllSubDirs("/user/hive/warehouse/p2", null, "hbase", false);

    stmt.execute("grant select on table p2 to role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/p2", FsAction.READ_EXECUTE, "hbase", true);

    stmt.execute("grant select on table p2 to role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/p2", FsAction.READ_EXECUTE, "hbase", true);

    // Create external table
    writeToPath("/tmp/external/ext1", 5, "foo", "bar");

    stmt.execute("create table ext1 (s string) location \'/tmp/external/ext1\'");
    verifyQuery(stmt, "ext1", 5);

    // Ensure existing group permissions are never returned..
    verifyOnAllSubDirs("/tmp/external/ext1", null, "bar", false);
    verifyOnAllSubDirs("/tmp/external/ext1", null, "hbase", false);

    stmt.execute("grant all on table ext1 to role p1_admin");
    verifyOnAllSubDirs("/tmp/external/ext1", FsAction.ALL, "hbase", true);

    stmt.execute("revoke select on table ext1 from role p1_admin");
    verifyOnAllSubDirs("/tmp/external/ext1", FsAction.WRITE_EXECUTE, "hbase", true);

    // Verify database operations works correctly
    stmt.execute("create database db1");
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db", null, "hbase", false);

    stmt.execute("create table db1.tbl1 (s string)");
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", null, "hbase", false);
    stmt.execute("create table db1.tbl2 (s string)");
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", null, "hbase", false);

    // Verify default db grants do not affect other dbs
    stmt.execute("grant all on database default to role p1_admin");
    verifyOnPath("/user/hive/warehouse", FsAction.ALL, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db", null, "hbase", false);

    // Verify table rename works
    stmt.execute("create table q1 (s string)");
    verifyOnAllSubDirs("/user/hive/warehouse/q1", FsAction.ALL, "hbase", true);
    stmt.execute("alter table q1 rename to q2");
    verifyOnAllSubDirs("/user/hive/warehouse/q2", FsAction.ALL, "hbase", true);

    // Verify table GRANTS do not trump db GRANTS
    stmt.execute("grant select on table q2 to role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/q2", FsAction.ALL, "hbase", true);

    stmt.execute("create table q3 (s string)");
    verifyOnAllSubDirs("/user/hive/warehouse/q3", FsAction.ALL, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/q2", FsAction.ALL, "hbase", true);

    // Verify db privileges are propagated to tables
    stmt.execute("grant select on database db1 to role p1_admin");
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.READ_EXECUTE, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", FsAction.READ_EXECUTE, "hbase", true);

    // Verify default db revokes do not affect other dbs
    stmt.execute("revoke all on database default from role p1_admin");
    verifyOnPath("/user/hive/warehouse", null, "hbase", false);
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.READ_EXECUTE, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", FsAction.READ_EXECUTE, "hbase", true);

    stmt.execute("use db1");
    stmt.execute("grant all on table tbl1 to role p1_admin");

    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.ALL, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", FsAction.READ_EXECUTE, "hbase", true);

    // Verify recursive revoke
    stmt.execute("revoke select on database db1 from role p1_admin");

    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl1", FsAction.WRITE_EXECUTE, "hbase", true);
    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/tbl2", null, "hbase", false);

    // Verify cleanup..
    stmt.execute("drop table tbl1");
    Assert.assertFalse(miniDFS.getFileSystem().exists(new Path("/user/hive/warehouse/db1.db/tbl1")));

    stmt.execute("drop table tbl2");
    Assert.assertFalse(miniDFS.getFileSystem().exists(new Path("/user/hive/warehouse/db1.db/tbl2")));

    stmt.execute("use default");
    stmt.execute("drop database db1");
    Assert.assertFalse(miniDFS.getFileSystem().exists(new Path("/user/hive/warehouse/db1.db")));

    // START : Verify external table set location..
    writeToPath("/tmp/external/tables/ext2_before/i=1", 5, "foo", "bar");
    writeToPath("/tmp/external/tables/ext2_before/i=2", 5, "foo", "bar");

    stmt.execute(
            "create external table ext2 (s string) partitioned by (i int) location \'/tmp/external/tables/ext2_before\'");
    stmt.execute("alter table ext2 add partition (i=1)");
    stmt.execute("alter table ext2 add partition (i=2)");
    verifyQuery(stmt, "ext2", 10);
    verifyOnAllSubDirs("/tmp/external/tables/ext2_before", null, "hbase", false);
    stmt.execute("grant all on table ext2 to role p1_admin");
    verifyOnPath("/tmp/external/tables/ext2_before", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=1", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=2", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=1/stuff.txt", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=2/stuff.txt", FsAction.ALL, "hbase", true);

    writeToPath("/tmp/external/tables/ext2_after/i=1", 6, "foo", "bar");
    writeToPath("/tmp/external/tables/ext2_after/i=2", 6, "foo", "bar");

    stmt.execute("alter table ext2 set location \'hdfs:///tmp/external/tables/ext2_after\'");
    // Even though table location is altered, partition location is still old (still 10 rows)
    verifyQuery(stmt, "ext2", 10);
    // You have to explicitly alter partition location..
    verifyOnPath("/tmp/external/tables/ext2_before", null, "hbase", false);
    verifyOnPath("/tmp/external/tables/ext2_before/i=1", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=2", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=1/stuff.txt", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_before/i=2/stuff.txt", FsAction.ALL, "hbase", true);

    stmt.execute(
            "alter table ext2 partition (i=1) set location \'hdfs:///tmp/external/tables/ext2_after/i=1\'");
    stmt.execute(
            "alter table ext2 partition (i=2) set location \'hdfs:///tmp/external/tables/ext2_after/i=2\'");
    // Now that partition location is altered, it picks up new data (12 rows instead of 10)
    verifyQuery(stmt, "ext2", 12);

    verifyOnPath("/tmp/external/tables/ext2_before", null, "hbase", false);
    verifyOnPath("/tmp/external/tables/ext2_before/i=1", null, "hbase", false);
    verifyOnPath("/tmp/external/tables/ext2_before/i=2", null, "hbase", false);
    verifyOnPath("/tmp/external/tables/ext2_before/i=1/stuff.txt", null, "hbase", false);
    verifyOnPath("/tmp/external/tables/ext2_before/i=2/stuff.txt", null, "hbase", false);
    verifyOnPath("/tmp/external/tables/ext2_after", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_after/i=1", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_after/i=2", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_after/i=1/stuff.txt", FsAction.ALL, "hbase", true);
    verifyOnPath("/tmp/external/tables/ext2_after/i=2/stuff.txt", FsAction.ALL, "hbase", true);
    // END : Verify external table set location..

    // Restart HDFS to verify if things are fine after re-start..

    // TODO : this is currently commented out since miniDFS.restartNameNode() does
    //        not work corectly on the version of hadoop sentry depends on
    //        This has been verified to work on a real cluster.
    //        Once miniDFS is fixed, this should be uncommented..
    // miniDFS.shutdown();
    // miniDFS.restartNameNode(true);
    // miniDFS.waitActive();
    // verifyOnPath("/tmp/external/tables/ext2_after", FsAction.ALL, "hbase", true);
    // verifyOnAllSubDirs("/user/hive/warehouse/p2", FsAction.READ_EXECUTE, "hbase", true);

    stmt.close();
    conn.close();
}