Example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled.

Prototype

public static boolean isSecurityEnabled() 

Source Link

Document

Determine if UserGroupInformation is using Kerberos to determine user identities or is relying on simple authentication

Usage

From source file:org.apache.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, HiveMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()),
                    tokenSignature);//from  w w  w. j  a  v  a2s.c  om

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}

From source file:org.apache.hcatalog.templeton.Main.java

License:Apache License

public Server runServer(int port) throws Exception {

    //Authenticate using keytab
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.loginUserFromKeytab(conf.kerberosPrincipal(), conf.kerberosKeytab());
    }//from  w ww  .ja  va2 s.com

    // Create the Jetty server
    Server server = new Server(port);
    ServletContextHandler root = new ServletContextHandler(server, "/");

    // Add the Auth filter
    FilterHolder fHolder = makeAuthFilter();

    /* 
     * We add filters for each of the URIs supported by templeton.
     * If we added the entire sub-structure using '/*', the mapreduce 
     * notification cannot give the callback to templeton in secure mode.
     * This is because mapreduce does not use secure credentials for 
     * callbacks. So jetty would fail the request as unauthorized.
     */
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/ddl/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/pig/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/hive/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/queue/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/mapreduce/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/status/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/version/*", FilterMapping.REQUEST);

    // Connect Jersey
    ServletHolder h = new ServletHolder(new ServletContainer(makeJerseyConfig()));
    root.addServlet(h, "/" + SERVLET_PATH + "/*");
    // Add any redirects
    addRedirects(server);

    // Start the server
    server.start();
    this.server = server;
    return server;
}

From source file:org.apache.hcatalog.templeton.Main.java

License:Apache License

public FilterHolder makeAuthFilter() {
    FilterHolder authFilter = new FilterHolder(AuthFilter.class);
    if (UserGroupInformation.isSecurityEnabled()) {
        authFilter.setInitParameter("dfs.web.authentication.signature.secret", conf.kerberosSecret());
        authFilter.setInitParameter("dfs.web.authentication.kerberos.principal", conf.kerberosPrincipal());
        authFilter.setInitParameter("dfs.web.authentication.kerberos.keytab", conf.kerberosKeytab());
    }/*from   w w w . j a  v  a2  s  . co  m*/
    return authFilter;
}

From source file:org.apache.hcatalog.templeton.SecureProxySupport.java

License:Apache License

public SecureProxySupport() {
    isEnabled = UserGroupInformation.isSecurityEnabled();
}

From source file:org.apache.hcatalog.templeton.Server.java

License:Apache License

/**
 * Verify that we have a valid user.  Throw an exception if invalid.
 *//*w  w  w  . ja  v  a 2  s . c o  m*/
public void verifyUser() throws NotAuthorizedException {
    if (getUser() == null) {
        String msg = "No user found.";
        if (!UserGroupInformation.isSecurityEnabled())
            msg += "  Missing " + PseudoAuthenticator.USER_NAME + " parameter.";
        throw new NotAuthorizedException(msg);
    }
}

From source file:org.apache.helix.provisioning.yarn.AppLauncher.java

License:Apache License

public boolean launch() throws Exception {
    LOG.info("Running Client");
    yarnClient.start();//from w  w  w.j  a  v  a 2  s.  c om

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    _appId = appContext.getApplicationId();
    _appMasterConfig.setAppId(_appId.getId());
    String appName = _applicationSpec.getAppName();
    _appMasterConfig.setAppName(appName);
    _appMasterConfig.setApplicationSpecFactory(_applicationSpecFactory.getClass().getCanonicalName());
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    LOG.info("Copy Application archive file from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(_conf);

    // get packages for each component packages
    Map<String, URI> packages = new HashMap<String, URI>();
    packages.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterArchive.toURI());
    packages.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), _yamlConfigFile.toURI());
    for (String serviceName : _applicationSpec.getServices()) {
        packages.put(serviceName, _applicationSpec.getServicePackage(serviceName));
    }
    Map<String, Path> hdfsDest = new HashMap<String, Path>();
    Map<String, String> classpathMap = new HashMap<String, String>();
    for (String name : packages.keySet()) {
        URI uri = packages.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        hdfsDest.put(name, dst);
        String classpath = generateClasspathAfterExtraction(name, new File(uri));
        classpathMap.put(name, classpath);
        _appMasterConfig.setClasspath(name, classpath);
        String serviceMainClass = _applicationSpec.getServiceMainClass(name);
        if (serviceMainClass != null) {
            _appMasterConfig.setMainClass(name, serviceMainClass);
        }
    }

    // Get YAML files describing all workflows to immediately start
    Map<String, URI> workflowFiles = new HashMap<String, URI>();
    List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs();
    if (taskConfigs != null) {
        for (TaskConfig taskConfig : taskConfigs) {
            URI configUri = taskConfig.getYamlURI();
            if (taskConfig.name != null && configUri != null) {
                workflowFiles.put(taskConfig.name, taskConfig.getYamlURI());
            }
        }
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LocalResource appMasterPkg = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    LocalResource appSpecFile = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString()));
    localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg);
    localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile);
    for (String name : workflowFiles.keySet()) {
        URI uri = workflowFiles.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        LocalResource taskLocalResource = setupLocalResource(fs, dst);
        localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name,
                taskLocalResource);
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*").append(File.pathSeparatorChar);
    classPathEnv.append(classpathMap.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    for (String c : _conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (_conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n");
    // Set the env variables to be setup in the env where the application master will be run
    Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master launch command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    int amMemory = 4096;
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(AppMasterLauncher.class.getCanonicalName());
    // Set params for Application Master
    // vargs.add("--num_containers " + String.valueOf(numContainers));

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = _conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    int amPriority = 0;
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    String amQueue = "default";
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    LOG.info("Submitting application to YARN Resource Manager");

    ApplicationId applicationId = yarnClient.submitApplication(appContext);

    LOG.info("Submitted application with applicationId:" + applicationId);

    return true;
}

From source file:org.apache.hive.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, IMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(
                    client.getDelegationToken(ugi.getUserName(), ugi.getUserName()), tokenSignature);

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                ShimLoader.getHadoopShims().getHCatShim().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }/*from  w  ww  .  j a  v  a2 s.  c o m*/
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}

From source file:org.apache.hive.hcatalog.templeton.Main.java

License:Apache License

public Server runServer(int port) throws Exception {

    //Authenticate using keytab
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.loginUserFromKeytab(conf.kerberosPrincipal(), conf.kerberosKeytab());
    }/*from w  w w  .  j  a  v a2  s. c  o m*/

    // Create the Jetty server. If jetty conf file exists, use that to create server
    // to have more control.
    Server server = null;
    if (StringUtils.isEmpty(conf.jettyConfiguration())) {
        server = new Server(port);
    } else {
        FileInputStream jettyConf = new FileInputStream(conf.jettyConfiguration());
        XmlConfiguration configuration = new XmlConfiguration(jettyConf);
        server = (Server) configuration.configure();
    }

    ServletContextHandler root = new ServletContextHandler(server, "/");

    // Add the Auth filter
    FilterHolder fHolder = makeAuthFilter();

    /* 
     * We add filters for each of the URIs supported by templeton.
     * If we added the entire sub-structure using '/*', the mapreduce 
     * notification cannot give the callback to templeton in secure mode.
     * This is because mapreduce does not use secure credentials for 
     * callbacks. So jetty would fail the request as unauthorized.
     */
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/ddl/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/pig/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/hive/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/sqoop/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/queue/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/jobs/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/mapreduce/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/status/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/version/*", FilterMapping.REQUEST);

    if (conf.getBoolean(AppConfig.XSRF_FILTER_ENABLED, false)) {
        root.addFilter(makeXSRFFilter(), "/" + SERVLET_PATH + "/*", FilterMapping.REQUEST);
        LOG.debug("XSRF filter enabled");
    } else {
        LOG.warn("XSRF filter disabled");
    }

    // Connect Jersey
    ServletHolder h = new ServletHolder(new ServletContainer(makeJerseyConfig()));
    root.addServlet(h, "/" + SERVLET_PATH + "/*");
    // Add any redirects
    addRedirects(server);

    // Start the server
    server.start();
    this.server = server;
    return server;
}

From source file:org.apache.hive.hcatalog.templeton.Main.java

License:Apache License

public FilterHolder makeAuthFilter() {
    FilterHolder authFilter = new FilterHolder(AuthFilter.class);
    UserNameHandler.allowAnonymous(authFilter);
    if (UserGroupInformation.isSecurityEnabled()) {
        //http://hadoop.apache.org/docs/r1.1.1/api/org/apache/hadoop/security/authentication/server/AuthenticationFilter.html
        authFilter.setInitParameter("dfs.web.authentication.signature.secret", conf.kerberosSecret());
        //https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2/src/packages/templates/conf/hdfs-site.xml
        authFilter.setInitParameter("dfs.web.authentication.kerberos.principal", conf.kerberosPrincipal());
        //http://https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.2/src/packages/templates/conf/hdfs-site.xml
        authFilter.setInitParameter("dfs.web.authentication.kerberos.keytab", conf.kerberosKeytab());
    }// ww  w  . j av a 2s. c o m
    return authFilter;
}

From source file:org.apache.hive.hcatalog.templeton.Server.java

License:Apache License

/**
 * Verify that we have a valid user.  Throw an exception if invalid.
 *//*from w w w.j  av a  2  s . com*/
public void verifyUser() throws NotAuthorizedException {
    String requestingUser = getRequestingUser();
    if (requestingUser == null) {
        String msg = "No user found.";
        if (!UserGroupInformation.isSecurityEnabled()) {
            msg += "  Missing " + PseudoAuthenticator.USER_NAME + " parameter.";
        }
        throw new NotAuthorizedException(msg);
    }
    if (doAs != null && !doAs.equals(requestingUser)) {
        /*if doAs user is different than logged in user, need to check that
        that logged in user is authorized to run as 'doAs'*/
        ProxyUserSupport.validate(requestingUser, getRequestingHost(requestingUser, request), doAs);
    }
}