Example usage for org.apache.hadoop.security UserGroupInformation createUserForTesting

List of usage examples for org.apache.hadoop.security UserGroupInformation createUserForTesting

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createUserForTesting.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createUserForTesting(String user, String[] userGroups) 

Source Link

Document

Create a UGI for testing HDFS and MapReduce

Usage

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Configuration startMiniHadoop() throws Exception {
    int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1);
    if (System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA) == null) {
        String testBuildData = new File("target").getAbsolutePath();
        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testBuildData);
    }/*from  w  ww  .ja  v  a 2 s  .  c  om*/
    //to trigger hdfs-site.xml registration as default resource
    new HdfsConfiguration();
    Configuration conf = new YarnConfiguration();
    String llamaProxyUser = System.getProperty("user.name");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*");
    String[] userGroups = new String[] { "g" };
    UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups);

    int hdfsPort = 0;
    String fsUri = conf.get("fs.defaultFS");
    if (fsUri != null && !fsUri.equals("file:///")) {
        int i = fsUri.lastIndexOf(":");
        if (i > -1) {
            try {
                hdfsPort = Integer.parseInt(fsUri.substring(i + 1));
            } catch (Exception ex) {
                throw new RuntimeException(
                        "Could not parse port from Hadoop's " + "'fs.defaultFS property: " + fsUri);
            }
        }
    }
    miniHdfs = new MiniDFSCluster(hdfsPort, conf, clusterNodes, !skipDfsFormat, true, null, null);
    miniHdfs.waitActive();
    conf = miniHdfs.getConfiguration(0);
    miniYarn = new MiniYARNCluster("minillama", clusterNodes, 1, 1);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

    miniYarn.init(conf);
    miniYarn.start();
    conf = miniYarn.getConfig();

    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    return conf;
}

From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java

License:Apache License

private Configuration createMiniYarnConfig(boolean usePortInName) throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.set("yarn.scheduler.fair.allocation.file", "test-fair-scheduler.xml");
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class, FairScheduler.class);

    //proxy user config
    String llamaProxyUser = System.getProperty("user.name");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*");
    String[] userGroups = new String[] { "g" };
    UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, usePortInName);
    return conf;/*w  w w .ja  va  2  s.c om*/
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * @return a {@link HftpFileSystem} object as specified user.
 *//*  www  . j a  v  a 2  s. co  m*/
public HftpFileSystem getHftpFileSystemAs(final String username, final Configuration conf, final int nnIndex,
        final String... groups) throws IOException, InterruptedException {
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, groups);
    return ugi.doAs(new PrivilegedExceptionAction<HftpFileSystem>() {
        @Override
        public HftpFileSystem run() throws Exception {
            return getHftpFileSystem(nnIndex);
        }
    });
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test//from  www  .j  a va2 s . c o m
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

static void checkFile(Path p, int expectedsize, final Configuration conf)
        throws IOException, InterruptedException {
    //open the file with another user account
    final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount;

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
            new String[] { "supergroup" });

    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);

    final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);

    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);

    //Able to read?
    for (int i = 0; i < expectedsize; i++) {
        Assert.assertEquals((byte) i, (byte) in.read());
    }//from ww w  .ja v a  2s. c om

    in.close();
}

From source file:com.streamsets.datacollector.hdfs.cluster.KafkaToHDFSIT.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    //setup kafka to read from
    KafkaTestUtil.startZookeeper();/*from   w  w  w  .  j  a v  a  2  s  . c  o m*/
    KafkaTestUtil.startKafkaBrokers(1);
    KafkaTestUtil.createTopic(TOPIC, 1, 1);
    producer = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
    produceRecords(RECORDS_PRODUCED);

    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();

    //setup Cluster and start pipeline
    YarnConfiguration entries = new YarnConfiguration();
    //TODO: Investigate why this is required for test to pass. Is yarn messing with the miniDFS cluster configuration?
    entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    ClusterUtil.setupCluster(TEST_NAME, getPipelineJson(), entries);
    serverURI = ClusterUtil.getServerURI();
    miniSDC = ClusterUtil.getMiniSDC();
}

From source file:com.streamsets.datacollector.hdfs.standalone.HdfsDestinationPipelineOperationsIT.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }//from   w ww .ja  va2  s .c  om
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    PipelineOperationsStandaloneIT.beforeClass(getPipelineJson());
}

From source file:com.streamsets.datacollector.hdfs.standalone.HdfsDestinationPipelineRunIT.java

License:Apache License

@Before
@Override//  w  w  w . j a  v a 2s.co  m
public void setUp() throws Exception {
    super.setUp();
    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();
}

From source file:com.streamsets.datacollector.hdfs.standalone.TestHdfsDestinationPipelineOperations.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }/*  w w w  .  j av a 2  s  . co  m*/
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    TestPipelineOperationsStandalone.beforeClass(getPipelineJson());
}

From source file:com.streamsets.datacollector.multiple.MultiplePipelinesComplexIT.java

License:Apache License

/**
 * The extending test must call this method in the method scheduled to run before class
 * @throws Exception//from   w ww  . jav  a  2s .  c  om
 */
@BeforeClass
public static void beforeClass() throws Exception {

    //setup kafka to read from
    KafkaTestUtil.startZookeeper();
    KafkaTestUtil.startKafkaBrokers(1);

    KafkaTestUtil.createTopic(TOPIC1, 1, 1);
    KafkaTestUtil.createTopic(TOPIC2, 1, 1);
    KafkaTestUtil.createTopic(TOPIC3, 1, 1);

    producer1 = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
    producer2 = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);

    e = Executors.newFixedThreadPool(2);
    e.submit(new Runnable() {
        @Override
        public void run() {
            int index = 0;
            while (true) {
                producer1.send(new KeyedMessage<>(TOPIC1, "0", "Hello Kafka" + index));
                ThreadUtil.sleep(200);
                index = (index + 1) % 10;
            }
        }
    });

    e.submit(new Runnable() {
        @Override
        public void run() {
            int index = 0;
            while (true) {
                producer2.send(new KeyedMessage<>(TOPIC2, "0", "Hello Kafka" + index));
                ThreadUtil.sleep(200);
                index = (index + 1) % 10;
            }
        }
    });

    //setup flume to write to
    source = new AvroSource();
    ch = new MemoryChannel();
    Configurables.configure(ch, new Context());

    Context context = new Context();
    //This should match whats present in the pipeline.json file
    flumePort = TestUtil.getFreePort();
    context.put("port", String.valueOf(flumePort));
    context.put("bind", "localhost");
    Configurables.configure(source, context);

    List<Channel> channels = new ArrayList<>();
    channels.add(ch);
    ChannelSelector rcs = new ReplicatingChannelSelector();
    rcs.setChannels(channels);
    source.setChannelProcessor(new ChannelProcessor(rcs));
    source.start();

    //HDFS settings
    // setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    UserGroupInformation.createUserForTesting("foo", new String[] { "all", "supergroup" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();

    MultiplePipelinesBaseIT.beforeClass(getPipelineJson());
}