Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_PERMISSIONS_SUPERUSERGROUP_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_PERMISSIONS_SUPERUSERGROUP_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_PERMISSIONS_SUPERUSERGROUP_KEY.

Prototype

String DFS_PERMISSIONS_SUPERUSERGROUP_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_PERMISSIONS_SUPERUSERGROUP_KEY.

Click Source Link

Usage

From source file:org.apache.bigtop.itest.hadoop.hcfs.TestCLI.java

License:Apache License

@Before
@Override/*from ww w  . j ava2 s .co  m*/
public void setUp() throws Exception {
    readTestConfigFile();

    // Configuration of real Hadoop cluster
    conf = new HdfsConfiguration();
    supergroup = System.getProperty("hcfs.root.groupname",
            conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY));
    namenode = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    username = System.getProperty("user.name");

    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
    // Many of the tests expect a replication value of 1 in the output
    conf.setInt("dfs.replication", 1);

    clitestDataDir = new File(TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+');

    String[] createTestcliDirCmds = { "hadoop fs -mkdir -p " + TEST_DIR_ABSOLUTE,
            "hadoop fs -chmod 777 " + TEST_DIR_ABSOLUTE };
    shHDFS.exec(createTestcliDirCmds);

    // Check assumptions which would make some cases fail if not met
    Assert.assertEquals("Creation of testcli dir should succeed and return 0"
            + " (but it failed with the following error message: "
            + StringUtils.join(shHDFS.getErr().toArray(), "\\n") + ")", 0, shHDFS.getRet());
    // We can't just use conf.setInt(fs.trash.interval",0) because if trash is
    // enabled on the server, client configuration value is ignored.
    Assert.assertEquals("HDFS trash should be disabled via fs.trash.interval", 0,
            conf.getInt("fs.trash.interval", 0));
    Assert.assertEquals("This test needs to be run under root user of hcfs",
            System.getProperty("hcfs.root.username", "hdfs"), System.getProperty("user.name"));

    // Initialize variables from test config file
    HCFS_SCHEME = System.getProperty("hcfs.scheme", "hdfs:");
    HCFS_DIRSIZE = System.getProperty("hcfs.dirsize.pattern", "0");
    HCFS_NNMATCH = System.getProperty("hcfs.namenode.pattern", "\\\\w+[-.a-z0-9]*(:[0-9]+)?");

    // HCFS fs.default.name Hack
    // Hadoop property 'fs.default.name' usually has value like this one:
    // "hdfs://namenode_hostname:port". But for other hadoop filesystems, the
    // value may just end with 3 slashes in a row (eg. 'glusterfs:///' or
    // 'maprfs:///'). This leads to file paths with 4 slashes in it (eg.
    // 'glusterfs:////tmp/testcli_sth') which are shortened back to
    // 'glusterfs:///tmp/...' if the file actually exists. To fix this we just
    // replace 4 slashes with 3 to prevent this from happening.
    String namenode_testdir = namenode + TEST_DIR_ABSOLUTE;
    NAMENODE_TESTDIR_HACK = namenode_testdir.replace(":////", ":///");
}

From source file:org.apache.bigtop.itest.hadoop.hcfs.TestDFSCLI.java

License:Apache License

@Before
@Override/*from ww  w  .j av  a 2  s.c  om*/
public void setUp() throws Exception {
    readTestConfigFile();

    // Configuration of real Hadoop cluster
    conf = new HdfsConfiguration();
    supergroup = System.getProperty("hcfs.root.groupname",
            conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY));
    namenode = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    username = System.getProperty("user.name");

    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
    // Many of the tests expect a replication value of 1 in the output
    conf.setInt("dfs.replication", 1);

    clitestDataDir = new File(TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+');

    String[] createTestcliDirCmds = { "hadoop fs -mkdir -p " + TEST_DIR_ABSOLUTE,
            "hadoop fs -chmod 777 " + TEST_DIR_ABSOLUTE };
    shHDFS.exec(createTestcliDirCmds);

    // Check assumptions which would make some cases fail if not met
    Assert.assertEquals("Creation of testcli dir should succeed and return 0"
            + " (but it failed with the following error message: "
            + StringUtils.join(shHDFS.getErr().toArray(), "\\n") + ")", 0, shHDFS.getRet());
    // We can't just use conf.setInt(fs.trash.interval", 0) because if trash is
    // enabled on the server, client configuration value is ignored.
    Assert.assertEquals("HDFS trash should be disabled via fs.trash.interval", 0,
            conf.getInt("fs.trash.interval", 0));
    Assert.assertEquals("This test needs to be run under root user of hcfs",
            System.getProperty("hcfs.root.username", "hdfs"), username);
}

From source file:org.apache.bigtop.itest.hadoop.hdfs.TestCLI.java

License:Apache License

@Before
@Override// w w  w.  j  av a2 s.  c o m
public void setUp() throws Exception {
    readTestConfigFile();
    conf = new HdfsConfiguration();
    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
    clitestDataDir = new File(TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+');
    nn = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    sug = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
    // Many of the tests expect a replication value of 1 in the output
    conf.setInt("dfs.replication", 1);
}