Example usage for org.apache.hadoop.security UserGroupInformation createUserForTesting

List of usage examples for org.apache.hadoop.security UserGroupInformation createUserForTesting

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createUserForTesting.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createUserForTesting(String user, String[] userGroups) 

Source Link

Document

Create a UGI for testing HDFS and MapReduce

Usage

From source file:com.streamsets.datacollector.multiple.TestMultiplePipelinesComplex.java

License:Apache License

/**
 * The extending test must call this method in the method scheduled to run before class
 * @throws Exception/* w ww . j  a  v a 2 s.  com*/
 */
@BeforeClass
public static void beforeClass() throws Exception {

    //setup kafka to read from
    KafkaTestUtil.startZookeeper();
    KafkaTestUtil.startKafkaBrokers(1);

    KafkaTestUtil.createTopic(TOPIC1, 1, 1);
    KafkaTestUtil.createTopic(TOPIC2, 1, 1);
    KafkaTestUtil.createTopic(TOPIC3, 1, 1);

    producer1 = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
    producer2 = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);

    e = Executors.newFixedThreadPool(2);
    e.submit(new Runnable() {
        @Override
        public void run() {
            int index = 0;
            while (true) {
                producer1.send(new KeyedMessage<>(TOPIC1, "0", "Hello Kafka" + index));
                ThreadUtil.sleep(200);
                index = (index + 1) % 10;
            }
        }
    });

    e.submit(new Runnable() {
        @Override
        public void run() {
            int index = 0;
            while (true) {
                producer2.send(new KeyedMessage<>(TOPIC2, "0", "Hello Kafka" + index));
                ThreadUtil.sleep(200);
                index = (index + 1) % 10;
            }
        }
    });

    //setup flume to write to
    source = new AvroSource();
    ch = new MemoryChannel();
    Configurables.configure(ch, new Context());

    Context context = new Context();
    //This should match whats present in the pipeline.json file
    flumePort = TestUtil.getFreePort();
    context.put("port", String.valueOf(flumePort));
    context.put("bind", "localhost");
    Configurables.configure(source, context);

    List<Channel> channels = new ArrayList<>();
    channels.add(ch);
    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);
    source.setChannelProcessor(new ChannelProcessor(rcs));
    source.start();

    //HDFS settings
    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();

    TestMultiplePipelinesBase.beforeClass(getPipelineJson());
}

From source file:com.streamsets.datacollector.security.TestHadoopSecurityUtil.java

License:Apache License

@Test
public void testGetProxyUser() throws Exception {
    final UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo",
            new String[] { "all" });
    UserGroupInformation ugi = HadoopSecurityUtil.getProxyUser("proxy", fooUgi);
    Assert.assertEquals("proxy", ugi.getUserName());
}

From source file:com.streamsets.pipeline.stage.destination.hbase.HBaseTargetIT.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    try {// w ww . ja va2s. c  o m
        conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/hbase");
        conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
        conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
        UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
        utility = new HBaseTestingUtility(conf);
        utility.startMiniCluster();
        miniZK = utility.getZkCluster();
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
        HColumnDescriptor hcd = new HColumnDescriptor(familyName);
        hcd.setMaxVersions(HConstants.ALL_VERSIONS);
        htd.addFamily(hcd);
        utility.getHBaseAdmin().createTable(htd);
    } catch (Throwable throwable) {
        LOG.error("Error in startup: " + throwable, throwable);
        throw throwable;
    }
}

From source file:com.streamsets.pipeline.stage.destination.hbase.TestHBaseTarget.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    try {//from   w  ww .  j a  v  a 2s. co m
        conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/hbase");
        conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
        conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
        UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
        utility = new HBaseTestingUtility(conf);
        utility.startMiniCluster();
        miniZK = utility.getZkCluster();
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
        htd.addFamily(new HColumnDescriptor(familyName));
        utility.getHBaseAdmin().createTable(htd);
    } catch (Throwable throwable) {
        LOG.error("Error in startup: " + throwable, throwable);
        throw throwable;
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.BaseHdfsTargetIT.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }//from  w  w  w . j a  v a 2s .com
    Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    FileSystem.closeAll();
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutorIT.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    // Conf dir//  w w  w  .j a v  a2s  .c om
    new File(confDir).mkdirs();

    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File(baseDir, "minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    Set<PosixFilePermission> set = new HashSet<>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    conf.set("dfs.namenode.acls.enabled", "true");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    FileSystem.closeAll();
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
    fs = miniDFS.getFileSystem();
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "core-site.xml");
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "hdfs-site.xml");
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.TestBaseHdfsTarget.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }//from  w  w  w.j  a  va 2s .  c o  m
    Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
}

From source file:com.streamsets.pipeline.stage.processor.hbase.HBaseProcessorIT.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    try {//  ww w. j a v a  2s. c  om
        context = ContextInfoCreator.createProcessorContext("n", false, OnRecordError.TO_ERROR);
        UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
        utility = new HBaseTestingUtility(conf);
        utility.startMiniCluster();
        miniZK = utility.getZkCluster();
        HTable ht = utility.createTable(Bytes.toBytes(tableName), Bytes.toBytes(familyName));

        // setup data
        List<Put> puts = new ArrayList<>();
        Put put = new Put(Bytes.toBytes("row1"));
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("column1"), Bytes.toBytes("value1"));
        puts.add(put);

        put = new Put(Bytes.toBytes("row1"));
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("column2"), Bytes.toBytes("value2"));
        puts.add(put);

        put = new Put(Bytes.toBytes("row2"));
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("column2"), Bytes.toBytes("value2"));
        puts.add(put);

        put = new Put(Bytes.toBytes("row3"));
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("column3"), Bytes.toBytes("value3"));
        puts.add(put);

        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
        Date date = sdf.parse("1986-09-21");
        put = new Put(Bytes.toBytes("rowTimestamp"), date.getTime());
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("columnTimestamp"), Bytes.toBytes("valueTimestamp"));

        date = sdf.parse("2000-10-10");
        puts.add(put);
        put = new Put(Bytes.toBytes("rowTimestamp"), date.getTime());
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("columnTimestamp"), Bytes.toBytes("valueTimestamp"));

        ht.put(puts);
    } catch (Throwable throwable) {
        LOG.error("Error in startup: " + throwable, throwable);
        throw throwable;
    }
}

From source file:com.trendmicro.hdfs.webdav.test.MiniClusterTestUtil.java

License:Apache License

public void startMiniCluster() throws Exception {
    startMiniCluster(UserGroupInformation.createUserForTesting("gateway", new String[] { "users" }));
}

From source file:etl.cmd.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }/* w  w  w .  j a v  a2  s . co  m*/
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
            dfsCluster = builder.build();
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));

            mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
            Configuration jobConf = mrCluster.getConfig();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapreduce.jobtracker.address"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            log.info("Job tracker: " + rmAddress);
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.defaultFS"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}