Example usage for org.apache.hadoop.security UserGroupInformation createUserForTesting

List of usage examples for org.apache.hadoop.security UserGroupInformation createUserForTesting

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createUserForTesting.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createUserForTesting(String user, String[] userGroups) 

Source Link

Document

Create a UGI for testing HDFS and MapReduce

Usage

From source file:org.apache.druid.indexer.path.GranularityPathSpecTest.java

License:Apache License

@Test
public void testAddInputPath() throws Exception {
    UserGroupInformation//  w  ww  .  jav a  2s  .com
            .setLoginUser(UserGroupInformation.createUserForTesting("test", new String[] { "testGroup" }));
    HadoopIngestionSpec spec = new HadoopIngestionSpec(new DataSchema("foo", null, new AggregatorFactory[0],
            new UniformGranularitySpec(Granularities.DAY, Granularities.MINUTE,
                    ImmutableList.of(Intervals.of("2015-11-06T00:00Z/2015-11-07T00:00Z"))),
            null, jsonMapper), new HadoopIOConfig(null, null, null), DEFAULT_TUNING_CONFIG);

    granularityPathSpec.setDataGranularity(Granularities.HOUR);
    granularityPathSpec.setFilePattern(".*");
    granularityPathSpec.setInputFormat(TextInputFormat.class);

    Job job = Job.getInstance();
    String formatStr = "file:%s/%s;org.apache.hadoop.mapreduce.lib.input.TextInputFormat";

    testFolder.newFolder("test", "y=2015", "m=11", "d=06", "H=00");
    testFolder.newFolder("test", "y=2015", "m=11", "d=06", "H=02");
    testFolder.newFolder("test", "y=2015", "m=11", "d=06", "H=05");
    testFolder.newFile("test/y=2015/m=11/d=06/H=00/file1");
    testFolder.newFile("test/y=2015/m=11/d=06/H=02/file2");
    testFolder.newFile("test/y=2015/m=11/d=06/H=05/file3");
    testFolder.newFile("test/y=2015/m=11/d=06/H=05/file4");

    granularityPathSpec.setInputPath(testFolder.getRoot().getPath() + "/test");

    granularityPathSpec.addInputPaths(HadoopDruidIndexerConfig.fromSpec(spec), job);

    String actual = job.getConfiguration().get("mapreduce.input.multipleinputs.dir.formats");

    String expected = Joiner.on(",")
            .join(Lists.newArrayList(
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/y=2015/m=11/d=06/H=00/file1"),
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/y=2015/m=11/d=06/H=02/file2"),
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/y=2015/m=11/d=06/H=05/file3"),
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/y=2015/m=11/d=06/H=05/file4")));

    Assert.assertEquals("Did not find expected input paths", expected, actual);
}

From source file:org.apache.druid.indexer.path.GranularityPathSpecTest.java

License:Apache License

@Test
public void testIntervalTrimming() throws Exception {
    UserGroupInformation// ww w .j a v  a 2 s.co m
            .setLoginUser(UserGroupInformation.createUserForTesting("test", new String[] { "testGroup" }));
    HadoopIngestionSpec spec = new HadoopIngestionSpec(
            new DataSchema("foo", null, new AggregatorFactory[0],
                    new UniformGranularitySpec(Granularities.DAY, Granularities.ALL,
                            ImmutableList.of(Intervals.of("2015-01-01T11Z/2015-01-02T05Z"))),
                    null, jsonMapper),
            new HadoopIOConfig(null, null, null), DEFAULT_TUNING_CONFIG);

    granularityPathSpec.setDataGranularity(Granularities.HOUR);
    granularityPathSpec.setPathFormat("yyyy/MM/dd/HH");
    granularityPathSpec.setFilePattern(".*");
    granularityPathSpec.setInputFormat(TextInputFormat.class);

    Job job = Job.getInstance();
    String formatStr = "file:%s/%s;org.apache.hadoop.mapreduce.lib.input.TextInputFormat";

    createFile(testFolder, "test/2015/01/01/00/file1", "test/2015/01/01/10/file2", "test/2015/01/01/18/file3",
            "test/2015/01/02/00/file1", "test/2015/01/02/03/file2", "test/2015/01/02/05/file3",
            "test/2015/01/02/07/file4", "test/2015/01/02/09/file5");

    granularityPathSpec.setInputPath(testFolder.getRoot().getPath() + "/test");

    granularityPathSpec.addInputPaths(HadoopDruidIndexerConfig.fromSpec(spec), job);

    String actual = job.getConfiguration().get("mapreduce.input.multipleinputs.dir.formats");

    String expected = Joiner.on(",")
            .join(Lists.newArrayList(
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/2015/01/01/18/file3"),
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/2015/01/02/00/file1"),
                    StringUtils.format(formatStr, testFolder.getRoot(), "test/2015/01/02/03/file2")));

    Assert.assertEquals("Did not find expected input paths", expected, actual);
}

From source file:org.apache.falcon.entity.AbstractTestBase.java

License:Apache License

@BeforeClass
public void initConfigStore() throws Exception {
    String configPath = new URI(StartupProperties.get().getProperty("config.store.uri")).getPath();
    String location = configPath + "-" + getClass().getName();
    StartupProperties.get().setProperty("config.store.uri", location);
    FileUtils.deleteDirectory(new File(location));

    cleanupStore();// w w  w.  jav a  2s . c o m
    String listeners = StartupProperties.get().getProperty("configstore.listeners");
    listeners = listeners.replace("org.apache.falcon.service.SharedLibraryHostingService", "");
    listeners = listeners.replace("org.apache.falcon.service.EntitySLAMonitoringService", "");
    StartupProperties.get().setProperty("configstore.listeners", listeners);
    store = ConfigurationStore.get();
    store.init();

    CurrentUser.authenticate(FalconTestUtil.TEST_USER_2);
    UserGroupInformation.createUserForTesting(FalconTestUtil.TEST_USER_2, new String[] { "testgroup" });
}

From source file:org.apache.falcon.hadoop.HadoopClientFactoryTest.java

License:Apache License

@Test
public void testCreateFileSystem() throws Exception {
    Configuration conf = embeddedCluster.getConf();

    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation realUser = UserGroupInformation.createUserForTesting(FalconTestUtil.TEST_USER_2,
            new String[] { "testgroup" });
    UserGroupInformation.createProxyUserForTesting("proxyuser", realUser, new String[] { "proxygroup" });

    URI uri = new URI(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
    Assert.assertNotNull(uri);//from   w ww.  jav a 2s .co  m
    FileSystem fs = HadoopClientFactory.get().createFileSystem(realUser, uri, conf);
    Assert.assertNotNull(fs);
}

From source file:org.apache.falcon.hadoop.HadoopClientFactoryTest.java

License:Apache License

@Test
public void testCreateFileSystemWithUser() throws Exception {
    Configuration conf = embeddedCluster.getConf();

    UserGroupInformation realUser = UserGroupInformation.createUserForTesting(FalconTestUtil.TEST_USER_2,
            new String[] { "testgroup" });
    UserGroupInformation.createProxyUserForTesting("proxyuser", realUser, new String[] { "proxygroup" });
    UserGroupInformation.setConfiguration(conf);

    URI uri = new URI(conf.get(HadoopClientFactory.FS_DEFAULT_NAME_KEY));
    Assert.assertNotNull(uri);/*from  w  ww  .ja  v  a  2s  . c o m*/

    CurrentUser.authenticate(System.getProperty("user.name"));
    FileSystem fs = HadoopClientFactory.get().createFileSystem(CurrentUser.getProxyUGI(), uri, conf);
    Assert.assertNotNull(fs);
}

From source file:org.apache.falcon.security.DefaultAuthorizationProviderTest.java

License:Apache License

@BeforeClass
public void setUp() throws Exception {
    realUser = UserGroupInformation.createUserForTesting(FalconTestUtil.TEST_USER_1,
            new String[] { "falcon", });

    CurrentUser.authenticate(EntityBuilderTestUtil.USER);
    org.testng.Assert.assertEquals(CurrentUser.getUser(), EntityBuilderTestUtil.USER);

    configStore = ConfigurationStore.get();

    addClusterEntity();//from   ww w. ja v  a  2s.c om
    addFeedEntity();
    addProcessEntity();
    org.testng.Assert.assertNotNull(processEntity);
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }//from  ww w .ja va  2s.co  m
        int taskTrackers = 2;
        int dataNodes = 2;
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
            String nnURI = fileSystem.getUri().toString();
            int numDirs = 1;
            String[] racks = null;
            String[] hosts = null;
            mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
            JobConf jobConf = mrCluster.createJobConf();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapred.job.tracker"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.default.name"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:org.apache.phoenix.end2end.SystemTablePermissionsIT.java

License:Apache License

@Test
public void testSystemTablePermissions() throws Exception {
    testUtil = new HBaseTestingUtility();
    clientProperties = new Properties();
    Configuration conf = testUtil.getConfiguration();
    setCommonConfigProperties(conf);/*w  w w  . j a va 2  s. c  o  m*/
    conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "false");
    clientProperties.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "false");
    testUtil.startMiniCluster(1);
    final UserGroupInformation superUser = UserGroupInformation.createUserForTesting(SUPERUSER, new String[0]);
    final UserGroupInformation regularUser = UserGroupInformation.createUserForTesting("user", new String[0]);

    superUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            createTable();
            readTable();
            return null;
        }
    });

    Set<String> tables = getHBaseTables();
    assertTrue("HBase tables do not include expected Phoenix tables: " + tables,
            tables.containsAll(PHOENIX_SYSTEM_TABLES));

    // Grant permission to the system tables for the unprivileged user
    superUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            try {
                grantPermissions(regularUser.getShortUserName(), PHOENIX_SYSTEM_TABLES, Action.EXEC,
                        Action.READ);
                grantPermissions(regularUser.getShortUserName(), Collections.singleton(TABLE_NAME),
                        Action.READ);
            } catch (Throwable e) {
                if (e instanceof Exception) {
                    throw (Exception) e;
                } else {
                    throw new Exception(e);
                }
            }
            return null;
        }
    });

    // Make sure that the unprivileged user can read the table
    regularUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            // We expect this to not throw an error
            readTable();
            return null;
        }
    });
}

From source file:org.apache.phoenix.end2end.SystemTablePermissionsIT.java

License:Apache License

@Test
public void testNamespaceMappedSystemTables() throws Exception {
    testUtil = new HBaseTestingUtility();
    clientProperties = new Properties();
    Configuration conf = testUtil.getConfiguration();
    setCommonConfigProperties(conf);//from  w  ww.  jav  a2 s  .  c om
    testUtil.getConfiguration().set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
    clientProperties.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
    testUtil.startMiniCluster(1);
    final UserGroupInformation superUser = UserGroupInformation.createUserForTesting(SUPERUSER, new String[0]);
    final UserGroupInformation regularUser = UserGroupInformation.createUserForTesting("user", new String[0]);

    superUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            createTable();
            readTable();
            return null;
        }
    });

    Set<String> tables = getHBaseTables();
    assertTrue("HBase tables do not include expected Phoenix tables: " + tables,
            tables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));

    // Grant permission to the system tables for the unprivileged user
    // An unprivileged user should only need to be able to Read and eXecute on them.
    superUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            try {
                grantPermissions(regularUser.getShortUserName(), PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
                        Action.EXEC, Action.READ);
                grantPermissions(regularUser.getShortUserName(), Collections.singleton(TABLE_NAME),
                        Action.READ);
            } catch (Throwable e) {
                if (e instanceof Exception) {
                    throw (Exception) e;
                } else {
                    throw new Exception(e);
                }
            }
            return null;
        }
    });

    regularUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            // We expect this to not throw an error
            readTable();
            return null;
        }
    });
}

From source file:org.apache.phoenix.queryserver.server.PhoenixDoAsCallbackTest.java

License:Apache License

@Test
public void ugiInstancesAreCached() throws Exception {
    Configuration conf = new Configuration(false);
    UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
    PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);

    UserGroupInformation ugi1 = callback.createProxyUser("user1");
    assertEquals(1, callback.getCache().size());
    assertTrue(ugi1.getRealUser() == serverUgi);
    UserGroupInformation ugi2 = callback.createProxyUser("user2");
    assertEquals(2, callback.getCache().size());
    assertTrue(ugi2.getRealUser() == serverUgi);

    UserGroupInformation ugi1Reference = callback.createProxyUser("user1");
    assertTrue(ugi1 == ugi1Reference);
    assertEquals(2, callback.getCache().size());
}