Example usage for org.apache.hadoop.fs.permission FsPermission createImmutable

List of usage examples for org.apache.hadoop.fs.permission FsPermission createImmutable

Introduction

In this page you can find the example usage for org.apache.hadoop.fs.permission FsPermission createImmutable.

Prototype

public static FsPermission createImmutable(short permission) 

Source Link

Document

Create an immutable FsPermission object.

Usage

From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void createFileWithPermission() throws Exception {
    List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733,
            0644, 0533, 0511);/*  w  ww .j a  va  2 s . c  o  m*/
    for (int value : permissionValues) {
        Path file = new Path("/createfile" + value);
        FsPermission permission = FsPermission.createImmutable((short) value);
        FSDataOutputStream o = sTFS.create(file, permission, false /* ignored */, 10 /* ignored */,
                (short) 1 /* ignored */, 512 /* ignored */, null /* ignored */);
        o.writeBytes("Test Bytes");
        o.close();
        FileStatus fs = sTFS.getFileStatus(file);
        Assert.assertEquals(permission, fs.getPermission());
    }
}

From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

@Test
public void mkdirsWithPermission() throws Exception {
    List<Integer> permissionValues = Lists.newArrayList(0111, 0222, 0333, 0444, 0555, 0666, 0777, 0755, 0733,
            0644, 0533, 0511);/*from  www .j  av a 2  s .  co  m*/
    for (int value : permissionValues) {
        Path dir = new Path("/createDir" + value);
        FsPermission permission = FsPermission.createImmutable((short) value);
        sTFS.mkdirs(dir, permission);
        FileStatus fs = sTFS.getFileStatus(dir);
        Assert.assertEquals(permission, fs.getPermission());
    }
}

From source file:alluxio.client.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setPermission(Path, org.apache.hadoop.fs.permission.FsPermission)}.
 * It will test changing the permission of file using TFS.
 *//* w  w w .ja va2  s .com*/
@Test
public void chmod() throws Exception {
    Path fileA = new Path("/chmodfileA");

    create(sTFS, fileA);
    FileStatus fs = sTFS.getFileStatus(fileA);
    Assert.assertTrue(sUfs.isFile(PathUtils.concatPath(sUfsRoot, fileA)));

    if (UnderFileSystemUtils.isHdfs(sUfs) && HadoopClientTestUtils.isHadoop1x()) {
        // If the UFS is hadoop 1.0, the org.apache.hadoop.fs.FileSystem.create uses default
        // permission option 0777.
        Assert.assertEquals((short) 0777, fs.getPermission().toShort());
    } else {
        // Default permission should be 0644.
        Assert.assertEquals((short) 0644, fs.getPermission().toShort());
    }

    sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755));
    Assert.assertEquals((short) 0755, sTFS.getFileStatus(fileA).getPermission().toShort());
}

From source file:alluxio.client.hadoop.FileSystemIntegrationTest.java

License:Apache License

@Test
public void closeFileSystem() throws Exception {
    Path file = new Path("/createfile");
    FsPermission permission = FsPermission.createImmutable((short) 0666);
    FSDataOutputStream o = sTFS.create(file, permission, false /* ignored */, 10 /* ignored */,
            (short) 1 /* ignored */, 512 /* ignored */, null /* ignored */);
    o.writeBytes("Test Bytes");
    o.close();/* w w  w.  j  av a2  s .c o m*/
    // with mark of delete-on-exit, the close method will try to delete it
    sTFS.deleteOnExit(file);
    sTFS.close();
}

From source file:alluxio.client.hadoop.FileSystemUriIntegrationTest.java

License:Apache License

/**
 * Tests connections to Alluxio cluster using URIs with connect details in authorities.
 *
 * @param authority the authority to test
 *///from ww  w  .j  av a  2 s  .  com
private void testConnection(String authority) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.alluxio.impl", FileSystem.class.getName());
    URI uri = URI.create("alluxio://" + authority + "/tmp/path.txt");
    org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(uri, conf);

    mCluster.waitForAllNodesRegistered(WAIT_TIMEOUT_MS);

    Path file = new Path("/testFile");
    FsPermission permission = FsPermission.createImmutable((short) 0666);
    FSDataOutputStream o = fs.create(file, permission, false /* ignored */, 10 /* ignored */,
            (short) 1 /* ignored */, 512 /* ignored */, null /* ignored */);
    o.writeBytes("Test Bytes");
    o.close();
    // with mark of delete-on-exit, the close method will try to delete it
    fs.deleteOnExit(file);
    fs.close();
    mCluster.notifySuccess();
}

From source file:alluxio.hadoop.FileSystemAclIntegrationTest.java

License:Apache License

/**
 * Test for {@link FileSystem#setPermission(Path, org.apache.hadoop.fs.permission.FsPermission)}.
 * It will test changing the permission of file using TFS.
 *//*from  www.j  av a  2s . c  om*/
@Test
public void chmod() throws Exception {
    Path fileA = new Path("/chmodfileA");

    create(sTFS, fileA);
    FileStatus fs = sTFS.getFileStatus(fileA);
    Assert.assertTrue(sUfs.exists(PathUtils.concatPath(sUfsRoot, fileA)));
    // Default permission should be 0644
    Assert.assertEquals((short) 0644, fs.getPermission().toShort());

    if (CommonUtils.isUfsObjectStorage(sUfsRoot)) {
        // For object storage ufs, setMode is not supported.
        mThrown.expect(IOException.class);
        mThrown.expectMessage("setOwner/setMode is not supported to object storage UFS via Alluxio.");
        sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755));
        return;
    }
    sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755));
    Assert.assertEquals((short) 0755, sTFS.getFileStatus(fileA).getPermission().toShort());
}

From source file:com.quantcast.qfs.hadoop.QFSImpl.java

License:Apache License

public FileStatus[] readdirplus(Path path) throws IOException {
    KfsAccess.DirectoryIterator itr = null;
    try {/*  www . ja v a 2s  .c  om*/
        itr = kfsAccess.new DirectoryIterator(path.toUri().getPath());
        final ArrayList<FileStatus> ret = new ArrayList<FileStatus>();
        String prefix = path.toString();
        if (!prefix.endsWith("/")) {
            prefix += "/";
        }
        while (itr.next()) {
            if (itr.filename.compareTo(".") == 0 || itr.filename.compareTo("..") == 0) {
                continue;
            }
            ret.add(new FileStatus(itr.isDirectory ? 0L : itr.filesize, itr.isDirectory,
                    itr.isDirectory ? 1 : itr.replication, itr.isDirectory ? 0 : BLOCK_SIZE,
                    itr.modificationTime, ACCESS_TIME, FsPermission.createImmutable((short) itr.mode),
                    itr.ownerName, itr.groupName, new Path(prefix + itr.filename)));
        }
        return ret.toArray(new FileStatus[0]);
    } finally {
        if (itr != null) {
            itr.close();
        }
    }
}

From source file:com.quantcast.qfs.hadoop.QFSImpl.java

License:Apache License

public FileStatus stat(Path path) throws IOException {
    final KfsFileAttr fa = new KfsFileAttr();
    final String pn = path.toUri().getPath();
    kfsAccess.kfs_retToIOException(kfsAccess.kfs_stat(pn, fa), pn);
    return new FileStatus(fa.isDirectory ? 0L : fa.filesize, fa.isDirectory,
            fa.isDirectory ? 1 : fa.replication, fa.isDirectory ? 0 : BLOCK_SIZE, fa.modificationTime,
            ACCESS_TIME, FsPermission.createImmutable((short) fa.mode), fa.ownerName, fa.groupName, path);
}

From source file:com.streamsets.pipeline.stage.BaseHiveIT.java

License:Apache License

/**
 * Start all required mini clusters./*w  w w  .  j a v a 2 s .  c  o m*/
 */
@BeforeClass
public static void setUpClass() throws Exception {
    // Conf dir
    new File(confDir).mkdirs();

    // HDFS
    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    Set<PosixFilePermission> set = new HashSet<>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    final Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "/core-site.xml");
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "/hdfs-site.xml");
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "/mapred-site.xml");
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "/yarn-site.xml");

    // Configuration for both HMS and HS2
    final HiveConf hiveConf = new HiveConf(miniDFS.getConfiguration(0), HiveConf.class);
    hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
            "jdbc:derby:;databaseName=target/metastore_db;create=true");
    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname,
            Utils.format("thrift://{}:{}", HOSTNAME, METASTORE_PORT));
    hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, "localhost");
    hiveConf.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, HIVE_SERVER_PORT);

    // Hive metastore
    Callable<Void> metastoreService = new Callable<Void>() {
        public Void call() throws Exception {
            try {
                HiveMetaStore.startMetaStore(METASTORE_PORT, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
                while (true)
                    ;
            } catch (Throwable e) {
                throw new Exception("Error starting metastore", e);
            }
        }
    };
    hiveMetastoreExecutor.submit(metastoreService);
    NetworkUtils.waitForStartUp(HOSTNAME, METASTORE_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);

    // HiveServer 2
    hiveServer2 = new HiveServer2();
    hiveServer2.init(hiveConf);
    hiveServer2.start();
    writeConfiguration(hiveServer2.getHiveConf(), confDir + "/hive-site.xml");
    NetworkUtils.waitForStartUp(HOSTNAME, HIVE_SERVER_PORT, MINICLUSTER_BOOT_RETRY, MINICLUSTER_BOOT_SLEEP);

    // JDBC Connection to Hive
    Class.forName(HIVE_JDBC_DRIVER);
    hiveConnection = HiveMetastoreUtil.getHiveConnection(getHiveJdbcUrl(),
            HadoopSecurityUtil.getLoginUser(conf));
    hiveQueryExecutor = new HiveQueryExecutor(hiveConnection);
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.BaseHdfsTargetIT.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }// w w w . j ava 2  s  . co  m
    Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    FileSystem.closeAll();
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
}