Example usage for org.apache.hadoop.security.authorize ProxyUsers refreshSuperUserGroupsConfiguration

List of usage examples for org.apache.hadoop.security.authorize ProxyUsers refreshSuperUserGroupsConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security.authorize ProxyUsers refreshSuperUserGroupsConfiguration.

Prototype

public static void refreshSuperUserGroupsConfiguration(Configuration conf) 

Source Link

Document

Refreshes configuration using the default Proxy user prefix for properties.

Usage

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

private Configuration startMiniHadoop() throws Exception {
    int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1);
    if (System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA) == null) {
        String testBuildData = new File("target").getAbsolutePath();
        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testBuildData);
    }/*w ww  .jav  a2 s.co  m*/
    //to trigger hdfs-site.xml registration as default resource
    new HdfsConfiguration();
    Configuration conf = new YarnConfiguration();
    String llamaProxyUser = System.getProperty("user.name");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".hosts", "*");
    conf.set("hadoop.proxyuser." + llamaProxyUser + ".groups", "*");
    String[] userGroups = new String[] { "g" };
    UserGroupInformation.createUserForTesting(llamaProxyUser, userGroups);

    int hdfsPort = 0;
    String fsUri = conf.get("fs.defaultFS");
    if (fsUri != null && !fsUri.equals("file:///")) {
        int i = fsUri.lastIndexOf(":");
        if (i > -1) {
            try {
                hdfsPort = Integer.parseInt(fsUri.substring(i + 1));
            } catch (Exception ex) {
                throw new RuntimeException(
                        "Could not parse port from Hadoop's " + "'fs.defaultFS property: " + fsUri);
            }
        }
    }
    miniHdfs = new MiniDFSCluster(hdfsPort, conf, clusterNodes, !skipDfsFormat, true, null, null);
    miniHdfs.waitActive();
    conf = miniHdfs.getConfiguration(0);
    miniYarn = new MiniYARNCluster("minillama", clusterNodes, 1, 1);
    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);

    miniYarn.init(conf);
    miniYarn.start();
    conf = miniYarn.getConfig();

    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    return conf;
}

From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java

License:Apache License

private void startYarn(Configuration conf, int nodeManagers) throws Exception {
    miniYarn = new MiniYARNCluster("minillama", nodeManagers, 1, 1);
    miniYarn.init(conf);//from   w w w .  j  a v a  2 s  .  c o  m
    miniYarn.start();
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    Assert.assertTrue("Wait for nodemanagers to connect failed on yarn startup",
            miniYarn.waitForNodeManagersToConnect(5000));
}

From source file:com.cloudera.llama.nm.TestLlamaNMAuxiliaryService.java

License:Apache License

private void startYarn(Configuration conf) throws Exception {
    miniYarn = new MiniYARNCluster("llama.nm.plugin", 1, 1, 1);
    miniYarn.init(conf);/*w ww.  j av  a 2  s  .  c  o  m*/
    miniYarn.start();
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

private void initMiniDFSCluster(Configuration conf, int numDataNodes, StorageType storageType, boolean format,
        boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
        boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks,
        String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode,
        boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
        boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays)
        throws IOException {
    ExitUtil.disableSystemExit();// w  w  w . j a va 2 s  . c o m

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
        instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;

    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class);

    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling checkpointing in the Standby node "
                + "since no HTTP ports have been specified.");
        conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling log-roll triggering in the "
                + "Standby node since no IPC ports have been specified.");
        conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    federation = nnTopology.isFederated();
    try {
        createNameNodesAndSetConf(nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
                enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
        LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir));
        throw ioe;
    }
    if (format) {
        if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
            throw new IOException(
                    "Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir));
        }
    }

    if (startOpt == StartupOption.RECOVER) {
        return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
            dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, simulatedCapacities, setupHostsFile,
            checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    // make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:etl.cmd.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }/*from w  w w  .j  a v  a2s.  c o  m*/
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
            dfsCluster = builder.build();
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));

            mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
            Configuration jobConf = mrCluster.getConfig();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapreduce.jobtracker.address"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            log.info("Job tracker: " + rmAddress);
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.defaultFS"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:org.apache.hive.service.auth.HiveAuthFactory.java

License:Apache License

public static void verifyProxyAccess(String realUser, String proxyUser, String ipAddress, HiveConf hiveConf)
        throws HiveSQLException {
    try {/*from w  w w.  jav  a 2s . c  om*/
        UserGroupInformation sessionUgi;
        if (UserGroupInformation.isSecurityEnabled()) {
            KerberosNameShim kerbName = ShimLoader.getHadoopShims().getKerberosNameShim(realUser);
            sessionUgi = UserGroupInformation.createProxyUser(kerbName.getServiceName(),
                    UserGroupInformation.getLoginUser());
        } else {
            sessionUgi = UserGroupInformation.createRemoteUser(realUser);
        }
        if (!proxyUser.equalsIgnoreCase(realUser)) {
            ProxyUsers.refreshSuperUserGroupsConfiguration(hiveConf);
            ProxyUsers.authorize(UserGroupInformation.createProxyUser(proxyUser, sessionUgi), ipAddress,
                    hiveConf);
        }
    } catch (IOException e) {
        throw new HiveSQLException("Failed to validate proxy privilege of " + realUser + " for " + proxyUser,
                "08S01", e);
    }
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }/*w  ww  .  j a  v  a2  s. c o m*/
        int taskTrackers = 2;
        int dataNodes = 2;
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
            String nnURI = fileSystem.getUri().toString();
            int numDirs = 1;
            String[] racks = null;
            String[] hosts = null;
            mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
            JobConf jobConf = mrCluster.createJobConf();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapred.job.tracker"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.default.name"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:org.apache.phoenix.queryserver.server.Main.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    logProcessInfo(getConf());//w w w . ja  v a2s.  c o  m
    try {
        final boolean isKerberos = "kerberos"
                .equalsIgnoreCase(getConf().get(QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB));

        // handle secure cluster credentials
        if (isKerberos) {
            String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
                    getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"),
                    getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default")));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Login to " + hostname + " using "
                        + getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB) + " and principal "
                        + getConf().get(QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + ".");
            }
            SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
                    QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname);
            LOG.info("Login successful.");
        }

        Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(
                QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class,
                PhoenixMetaFactory.class);
        int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT);
        LOG.debug("Listening on port " + port);
        PhoenixMetaFactory factory = factoryClass.getDeclaredConstructor(Configuration.class)
                .newInstance(getConf());
        Meta meta = factory.create(Arrays.asList(args));
        Service service = new LocalService(meta);

        // Start building the Avatica HttpServer
        final HttpServer.Builder builder = new HttpServer.Builder().withPort(port).withHandler(service,
                getSerialization(getConf()));

        // Enable SPNEGO and Impersonation when using Kerberos
        if (isKerberos) {
            UserGroupInformation ugi = UserGroupInformation.getLoginUser();

            // Make sure the proxyuser configuration is up to date
            ProxyUsers.refreshSuperUserGroupsConfiguration(getConf());

            String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
            File keytab = new File(keytabPath);

            // Enable SPNEGO and impersonation (through standard Hadoop configuration means)
            builder.withSpnego(ugi.getUserName()).withAutomaticLogin(keytab)
                    .withImpersonation(new PhoenixDoAsCallback(ugi));
        }

        // Build and start the HttpServer
        server = builder.build();
        server.start();
        runningLatch.countDown();
        server.join();
        return 0;
    } catch (Throwable t) {
        LOG.fatal("Unrecoverable service error. Shutting down.", t);
        this.t = t;
        return -1;
    }
}

From source file:org.apache.phoenix.queryserver.server.PhoenixDoAsCallbackTest.java

License:Apache License

@Test
public void proxyingUsersAreCached() throws Exception {
    Configuration conf = new Configuration(false);
    // The user "server" can impersonate anyone
    conf.set("hadoop.proxyuser.server.groups", "*");
    conf.set("hadoop.proxyuser.server.hosts", "*");
    // Trigger ProxyUsers to refresh itself with the above configuration
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
    PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);

    UserGroupInformation user1 = callback.doAsRemoteUser("user1", "localhost:1234",
            new Callable<UserGroupInformation>() {
                public UserGroupInformation call() throws Exception {
                    return UserGroupInformation.getCurrentUser();
                }/*  w  w  w . jav a 2s  . c  om*/
            });

    UserGroupInformation user2 = callback.doAsRemoteUser("user2", "localhost:1235",
            new Callable<UserGroupInformation>() {
                public UserGroupInformation call() throws Exception {
                    return UserGroupInformation.getCurrentUser();
                }
            });

    UserGroupInformation user1Reference = callback.doAsRemoteUser("user1", "localhost:1234",
            new Callable<UserGroupInformation>() {
                public UserGroupInformation call() throws Exception {
                    return UserGroupInformation.getCurrentUser();
                }
            });

    // The UserGroupInformation.getCurrentUser() actually returns a new UGI instance, but the internal
    // subject is the same. We can verify things will work as expected that way.
    assertNotEquals(user1.hashCode(), user2.hashCode());
    assertEquals("These should be the same (cached) instance", user1.hashCode(), user1Reference.hashCode());
    assertEquals("These should be the same (cached) instance", user1, user1Reference);
}

From source file:org.apache.phoenix.queryserver.server.PhoenixRemoteUserExtractorTest.java

License:Apache License

@Test
public void testWithRemoteUserExtractorSuccess() {
    HttpServletRequest request = mock(HttpServletRequest.class);
    when(request.getRemoteUser()).thenReturn("proxyserver");
    when(request.getParameter("doAs")).thenReturn("enduser");
    when(request.getRemoteAddr()).thenReturn("localhost:1234");

    Configuration conf = new Configuration(false);
    conf.set("hadoop.proxyuser.proxyserver.groups", "*");
    conf.set("hadoop.proxyuser.proxyserver.hosts", "*");
    conf.set("phoenix.queryserver.withRemoteUserExtractor", "true");
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

    PhoenixRemoteUserExtractor extractor = new PhoenixRemoteUserExtractor(conf);
    try {/*from  ww  w  . j  a v  a 2 s.co  m*/
        assertEquals("enduser", extractor.extract(request));
    } catch (RemoteUserExtractionException e) {
        LOG.info(e.getMessage());
    }
}