Example usage for org.apache.hadoop.fs Path getPathWithoutSchemeAndAuthority

List of usage examples for org.apache.hadoop.fs Path getPathWithoutSchemeAndAuthority

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getPathWithoutSchemeAndAuthority.

Prototype

public static Path getPathWithoutSchemeAndAuthority(Path path) 

Source Link

Document

Return a version of the given Path without the scheme information.

Usage

From source file:org.apache.sentry.tests.e2e.hdfs.TestDbHdfsExtMaxGroups.java

License:Apache License

/**
 * Test Db and tbl level acls are synced up to db, tbl and par paths
 * The path is pre-configured in "sentry.hdfs.integration.path.prefixes"
 * @throws Exception/*from w  w w.  j a va2  s . c o  m*/
 */
@Test
public void testExtMaxAclsWithGroups() throws Exception {
    final String TEST_DB = "test_hdfs_max_group_ext_db";
    assumeThat(Strings.isNullOrEmpty(testExtPathDir), not(true));
    String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(testExtPathDir)) + "/" + TEST_DB;
    LOGGER.info("extDbDir = " + extDbDir);
    Path extDbPath = new Path(extDbDir);
    kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin));
    if (fileSystem.exists(extDbPath)) {
        LOGGER.info("Deleting " + extDbDir);
        fileSystem.delete(extDbPath, true);
    }
    dropRecreateDbTblRl(extDbDir, TEST_DB, TEST_TBL);
    testMaxGroupsDbTblHelper(extDbDir, TEST_DB);
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestDbHdfsExtMaxGroups.java

License:Apache License

/**
 * A negative test case where path is not in prefix list.
 * In this case, acls should not be applied to db, tbl and par paths
 * @throws Exception// www. j  a  v  a  2 s .co m
 */
@Test
public void testPathNotInPrefix() throws Exception {
    final String TEST_DB = "test_hdfs_max_group_bad_db";
    String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(scratchLikeDir)) + "/" + TEST_DB;
    LOGGER.info("extDbDir = " + extDbDir);
    Path extDbPath = new Path(extDbDir);
    kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin));
    if (fileSystem.exists(extDbPath)) {
        fileSystem.delete(extDbPath, true);
    }
    dropRecreateDbTblRl(extDbDir, TEST_DB, TEST_TBL);
    Connection connection = context.createConnection(ADMIN1);
    Statement statement = connection.createStatement();
    exec(statement, "USE " + TEST_DB);
    dropRecreateRole(statement, TEST_ROLE1);
    String dbgrp = "dbgrp";
    exec(statement, "GRANT ALL ON DATABASE " + TEST_DB + " TO ROLE " + TEST_ROLE1);
    exec(statement, "GRANT ROLE " + TEST_ROLE1 + " TO GROUP " + dbgrp);

    context.close();

    List<AclEntry> acls = new ArrayList<>();
    acls.add(AclEntry.parseAclEntry("group:" + dbgrp + ":rwx", true));
    verifyNoAclRecursive(acls, extDbDir, true);
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestDbHdfsMaxGroups.java

License:Apache License

/**
 * Test Db and tbl level acls are synced up to db, tbl and par paths
 * @throws Exception// w  w  w  .j  a va2s.c om
 */
@Test
public void testIntDbTblMaxAclsWithGroups() throws Exception {
    final String TEST_DB = "test_hdfs_max_group_int_db";
    String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(metastoreDir)) + "/" + TEST_DB + ".db";
    LOGGER.info("extDbDir = " + extDbDir);
    dropRecreateDbTblRl(TEST_DB, TEST_TBL);
    testMaxGroupsDbTblHelper(extDbDir, TEST_DB);
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestDbHdfsMaxGroups.java

License:Apache License

/**
 * Test col level acls should not sync up to db, tbl and par paths
 * @throws Exception/*w w w  .j  a v  a  2 s .c om*/
 */
@Test
public void testIntColMaxAclsWithGroups() throws Exception {
    final String TEST_DB = "test_hdfs_max_group_int_col_db";
    String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(metastoreDir)) + "/" + TEST_DB + ".db";
    LOGGER.info("extDbDir = " + extDbDir);
    dropRecreateDbTblRl(TEST_DB, TEST_TBL);
    testMaxGroupsColHelper(extDbDir, TEST_DB);
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestDbHdfsMaxGroups.java

License:Apache License

/**
 * Test Db and tbl level acls are synced up to db, tbl (no partitions)
 * @throws Exception//from ww w.  j a va2 s. co m
 */
@Test
public void testIntDbTblMaxAclsWithGroupsNoPar() throws Exception {
    final String TEST_DB = "test_hdfs_max_group_int_nopar_db";
    String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(metastoreDir)) + "/" + TEST_DB + ".db";
    LOGGER.info("extDbDir = " + extDbDir);
    dropRecreateDbTblNoPar(TEST_DB, TEST_TBL);

    String tblPathLoc = extDbDir + "/" + TEST_TBL;
    LOGGER.info("tblPathLoc = " + tblPathLoc);
    Connection connection = context.createConnection(ADMIN1);
    Statement statement = connection.createStatement();
    exec(statement, "USE " + TEST_DB);
    dropRecreateRole(statement, TEST_ROLE1);
    exec(statement, "GRANT SELECT ON TABLE " + TEST_TBL + " TO ROLE " + TEST_ROLE1);

    List<AclEntry> tblacls = new ArrayList<>();
    for (int i = 0; i < MAX_NUM_OF_GROUPS; i++) {
        String tblgrp = "tblgrp" + String.valueOf(i);
        tblacls.add(AclEntry.parseAclEntry("group:" + tblgrp + ":r-x", true));
        exec(statement, "GRANT ROLE " + TEST_ROLE1 + " TO GROUP " + tblgrp);
    }
    context.close();

    // tbl level privileges should sync up acls to tbl and par paths
    verifyAclsRecursive(tblacls, tblPathLoc, true);
    // tbl level privileges should not sync up acls to db path
    verifyNoAclRecursive(tblacls, extDbDir, false);
}

From source file:org.apache.storm.hdfs.spout.TestFileLock.java

License:Apache License

@Test
public void testLockRecovery() throws Exception {
    final int LOCK_EXPIRY_SEC = 1;
    final int WAIT_MSEC = LOCK_EXPIRY_SEC * 1000 + 500;
    Path file1 = new Path(filesDir + Path.SEPARATOR + "file1");
    Path file2 = new Path(filesDir + Path.SEPARATOR + "file2");
    Path file3 = new Path(filesDir + Path.SEPARATOR + "file3");

    fs.create(file1).close();/*from   w w w  .  ja v a2 s.  co  m*/
    fs.create(file2).close();
    fs.create(file3).close();

    // 1) acquire locks on file1,file2,file3
    FileLock lock1 = FileLock.tryLock(fs, file1, locksDir, "spout1");
    FileLock lock2 = FileLock.tryLock(fs, file2, locksDir, "spout2");
    FileLock lock3 = FileLock.tryLock(fs, file3, locksDir, "spout3");
    Assert.assertNotNull(lock1);
    Assert.assertNotNull(lock2);
    Assert.assertNotNull(lock3);

    try {
        HdfsUtils.Pair<Path, FileLock.LogEntry> expired = FileLock.locateOldestExpiredLock(fs, locksDir,
                LOCK_EXPIRY_SEC);
        Assert.assertNull(expired);

        // 1) Simulate lock file lease expiring and getting closed by HDFS
        closeUnderlyingLockFile(lock3);

        // 2) wait for all 3 locks to expire then heart beat on 2 locks
        Thread.sleep(WAIT_MSEC * 2); // wait for locks to expire
        lock1.heartbeat("1");
        lock2.heartbeat("1");

        // 3) Take ownership of stale lock
        FileLock lock3b = FileLock.acquireOldestExpiredLock(fs, locksDir, LOCK_EXPIRY_SEC, "spout1");
        Assert.assertNotNull(lock3b);
        Assert.assertEquals("Expected lock3 file", Path.getPathWithoutSchemeAndAuthority(lock3b.getLockFile()),
                lock3.getLockFile());
    } finally {
        lock1.release();
        lock2.release();
        lock3.release();
        fs.delete(file1, false);
        fs.delete(file2, false);
        try {
            fs.delete(file3, false);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.storm.hdfs.spout.TestHdfsSpout.java

License:Apache License

private List<String> listDir(Path p) throws IOException {
    ArrayList<String> result = new ArrayList<>();
    RemoteIterator<LocatedFileStatus> fileNames = fs.listFiles(p, false);
    while (fileNames.hasNext()) {
        LocatedFileStatus fileStatus = fileNames.next();
        result.add(Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toString());
    }//w  ww  .  j a v  a2s  .  c o  m
    return result;
}

From source file:org.shaf.core.util.IOUtils.java

License:Apache License

/**
 * Normalizes the {@link Path}./*from  w w w  .j  a v  a2s .c  o  m*/
 * 
 * @param path
 *            the path to normalize.
 * @return the normalized path.
 * @throws IOException
 *             if an I/O error occurs.
 */
public static final Path normalizePath(final Path path) throws IOException {
    return Path.getPathWithoutSchemeAndAuthority(path);
}