Example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser

List of usage examples for org.apache.hadoop.security UserGroupInformation createRemoteUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user) 

Source Link

Document

Create a user from a login name.

Usage

From source file:TestRawParascaleFileSystemBase.java

License:Apache License

protected void init() throws URISyntaxException, IOException {
    groupInformation = UserGroupInformation.createRemoteUser("hadoop");
    fs = getFileSystem(groupInformation);
    final Configuration conf = getConf();
    fs.initialize(new URI(conf.get(FS_DEFAULT_NAME)), getConf());
}

From source file:ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/* w  w w. j  av a 2s .  co  m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }
}

From source file:ParascaleFileSystem.java

License:Apache License

/**
 * {@inheritDoc}/*from w  w w  .j  av a2 s . c  om*/
 */
@Override
public void initialize(final URI uri, final Configuration conf) throws IOException {
    final URI rawUri;
    final RawParascaleFileSystem rawParascaleFileSystem;
    UserGroupInformation groupInformation;
    try {
        if (conf.get("hadoop.job.ugi") != null) {
            String username = new StringTokenizer(conf.get("hadoop.job.ugi"), ",").nextToken();
            groupInformation = UserGroupInformation.createRemoteUser(username);
        } else {
            groupInformation = UserGroupInformation.getCurrentUser();
        }
        rawParascaleFileSystem = new RawParascaleFileSystem(groupInformation);
        fs = conf.getBoolean(CRC_FILESYSTEM, false) ? new ChecksumFsWrapper(rawParascaleFileSystem)
                : rawParascaleFileSystem;
        rawUri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
    } catch (final URISyntaxException e) {
        throw (IOException) new IOException().initCause(e);
    }
    // initialize with the raw URI - RawFS expects it without a path!
    fs.initialize(rawUri, conf);
    if (!rawParascaleFileSystem.isMountPointAbsolute()) {
        throw new IOException(
                "Mountpoint " + rawParascaleFileSystem.getMountPoint() + " is not an absolute path");
    }
    if (!rawParascaleFileSystem.mountPointExists()) {
        throw new IOException("WorkingDirectory does not exist - can not mount Parascale " + "filesystem at "
                + rawParascaleFileSystem.getMountPath());
    }
    if (!rawParascaleFileSystem.createHomeDirectory()) {
        throw new IOException("Can not create HomeDirectory");
    }

}

From source file:alluxio.yarn.ApplicationMaster.java

License:Apache License

/**
 * @param args Command line arguments to launch application master
 *//*from   w w  w  .  j  a v  a2 s . c o  m*/
public static void main(String[] args) {
    Options options = new Options();
    options.addOption("num_workers", true, "Number of Alluxio workers to launch. Default 1");
    options.addOption("master_address", true, "(Required) Address to run Alluxio master");
    options.addOption("resource_path", true, "(Required) HDFS path containing the Application Master");

    try {
        LOG.info("Starting Application Master with args {}", Arrays.toString(args));
        final CommandLine cliParser = new GnuParser().parse(options, args);

        YarnConfiguration conf = new YarnConfiguration();
        UserGroupInformation.setConfiguration(conf);
        if (UserGroupInformation.isSecurityEnabled()) {
            String user = System.getenv("ALLUXIO_USER");
            UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
            for (Token token : UserGroupInformation.getCurrentUser().getTokens()) {
                ugi.addToken(token);
            }
            LOG.info("UserGroupInformation: " + ugi);
            ugi.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    runApplicationMaster(cliParser);
                    return null;
                }
            });
        } else {
            runApplicationMaster(cliParser);
        }
    } catch (Exception e) {
        LOG.error("Error running Application Master", e);
        System.exit(1);
    }
}

From source file:azkaban.jobtype.HadoopJavaJobRunnerMain.java

License:Apache License

public HadoopJavaJobRunnerMain() throws Exception {
    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override//from  w  ww .j a  v  a  2s.c  o  m
        public void run() {
            cancelJob();
        }
    });

    try {
        _jobName = System.getenv(ProcessJob.JOB_NAME_ENV);
        String propsFile = System.getenv(ProcessJob.JOB_PROP_ENV);

        _logger = Logger.getRootLogger();
        _logger.removeAllAppenders();
        ConsoleAppender appender = new ConsoleAppender(DEFAULT_LAYOUT);
        appender.activateOptions();
        _logger.addAppender(appender);
        _logger.setLevel(Level.INFO); //Explicitly setting level to INFO

        Properties props = new Properties();
        props.load(new BufferedReader(new FileReader(propsFile)));

        HadoopConfigurationInjector.injectResources(new Props(null, props));

        final Configuration conf = new Configuration();

        UserGroupInformation.setConfiguration(conf);
        securityEnabled = UserGroupInformation.isSecurityEnabled();

        _logger.info("Running job " + _jobName);
        String className = props.getProperty(JOB_CLASS);
        if (className == null) {
            throw new Exception("Class name is not set.");
        }
        _logger.info("Class name " + className);

        UserGroupInformation loginUser = null;
        UserGroupInformation proxyUser = null;

        if (shouldProxy(props)) {
            String userToProxy = props.getProperty("user.to.proxy");
            if (securityEnabled) {
                String filelocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
                _logger.info("Found token file " + filelocation);
                _logger.info("Security enabled is " + UserGroupInformation.isSecurityEnabled());

                _logger.info("Setting mapreduce.job.credentials.binary to " + filelocation);
                System.setProperty("mapreduce.job.credentials.binary", filelocation);

                _logger.info("Proxying enabled.");

                loginUser = UserGroupInformation.getLoginUser();

                _logger.info("Current logged in user is " + loginUser.getUserName());

                proxyUser = UserGroupInformation.createProxyUser(userToProxy, loginUser);
                for (Token<?> token : loginUser.getTokens()) {
                    proxyUser.addToken(token);
                }
            } else {
                proxyUser = UserGroupInformation.createRemoteUser(userToProxy);
            }
            _logger.info("Proxied as user " + userToProxy);
        }

        // Create the object using proxy
        if (shouldProxy(props)) {
            _javaObject = getObjectAsProxyUser(props, _logger, _jobName, className, proxyUser);
        } else {
            _javaObject = getObject(_jobName, className, props, _logger);
        }

        if (_javaObject == null) {
            _logger.info("Could not create java object to run job: " + className);
            throw new Exception("Could not create running object");
        }
        _logger.info("Got object " + _javaObject.toString());

        _cancelMethod = props.getProperty(CANCEL_METHOD_PARAM, DEFAULT_CANCEL_METHOD);

        final String runMethod = props.getProperty(RUN_METHOD_PARAM, DEFAULT_RUN_METHOD);
        _logger.info("Invoking method " + runMethod);

        if (shouldProxy(props)) {
            _logger.info("Proxying enabled.");
            runMethodAsUser(props, _javaObject, runMethod, proxyUser);
        } else {
            _logger.info("Proxy check failed, not proxying run.");
            runMethod(_javaObject, runMethod);
        }

        _isFinished = true;

        // Get the generated properties and store them to disk, to be read
        // by ProcessJob.
        try {
            final Method generatedPropertiesMethod = _javaObject.getClass()
                    .getMethod(GET_GENERATED_PROPERTIES_METHOD, new Class<?>[] {});
            Object outputGendProps = generatedPropertiesMethod.invoke(_javaObject, new Object[] {});

            if (outputGendProps != null) {
                final Method toPropertiesMethod = outputGendProps.getClass().getMethod("toProperties",
                        new Class<?>[] {});
                Properties properties = (Properties) toPropertiesMethod.invoke(outputGendProps,
                        new Object[] {});

                Props outputProps = new Props(null, properties);
                outputGeneratedProperties(outputProps);
            } else {
                _logger.info(GET_GENERATED_PROPERTIES_METHOD
                        + " method returned null.  No properties to pass along");
            }
        } catch (NoSuchMethodException e) {
            _logger.info(String.format(
                    "Apparently there isn't a method[%s] on object[%s], using " + "empty Props object instead.",
                    GET_GENERATED_PROPERTIES_METHOD, _javaObject));
            outputGeneratedProperties(new Props());
        }
    } catch (Exception e) {
        _isFinished = true;
        throw e;
    }
}

From source file:azkaban.jobtype.HadoopSecureWrapperUtils.java

License:Apache License

/**
 * Sets up the UserGroupInformation proxyUser object so that calling code can do doAs returns null
 * if the jobProps does not call for a proxyUser
 * //from   w w  w  .ja va2  s  .  co  m
 * @param jobPropsIn
 * @param tokenFile
 *          pass tokenFile if known. Pass null if the tokenFile is in the environmental variable
 *          already.
 * @param log
 * @return returns null if no need to run as proxyUser, otherwise returns valid proxyUser that can
 *         doAs
 */
public static UserGroupInformation setupProxyUser(Properties jobProps, String tokenFile, Logger log) {
    UserGroupInformation proxyUser = null;

    if (!HadoopSecureWrapperUtils.shouldProxy(jobProps)) {
        log.info("submitting job as original submitter, not proxying");
        return proxyUser;
    }

    // set up hadoop related configurations
    final Configuration conf = new Configuration();
    UserGroupInformation.setConfiguration(conf);
    boolean securityEnabled = UserGroupInformation.isSecurityEnabled();

    // setting up proxy user if required
    try {
        String userToProxy = null;
        userToProxy = jobProps.getProperty(HadoopSecurityManager.USER_TO_PROXY);
        if (securityEnabled) {
            proxyUser = HadoopSecureWrapperUtils.createSecurityEnabledProxyUser(userToProxy, tokenFile, log);
            log.info("security enabled, proxying as user " + userToProxy);
        } else {
            proxyUser = UserGroupInformation.createRemoteUser(userToProxy);
            log.info("security not enabled, proxying as user " + userToProxy);
        }
    } catch (IOException e) {
        log.error("HadoopSecureWrapperUtils.setupProxyUser threw an IOException", e);
    }

    return proxyUser;
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

/**
 * Create a proxied user based on the explicit user name, taking other
 * parameters necessary from properties file.
 *
 * @throws IOException//from   w  w w  .  j  av a  2 s  . c  o  m
 */
@Override
public synchronized UserGroupInformation getProxiedUser(String userToProxy)
        throws HadoopSecurityManagerException {

    if (userToProxy == null) {
        throw new HadoopSecurityManagerException("userToProxy can't be null");
    }

    UserGroupInformation ugi = userUgiMap.get(userToProxy);
    if (ugi == null) {
        logger.info("proxy user " + userToProxy + " not exist. Creating new proxy user");
        if (shouldProxy) {
            try {
                ugi = UserGroupInformation.createProxyUser(userToProxy, UserGroupInformation.getLoginUser());
            } catch (IOException e) {
                e.printStackTrace();
                throw new HadoopSecurityManagerException("Failed to create proxy user", e);
            }
        } else {
            ugi = UserGroupInformation.createRemoteUser(userToProxy);
        }
        userUgiMap.putIfAbsent(userToProxy, ugi);
    }
    return ugi;
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

/**
 * Create a proxied user based on the explicit user name, taking other
 * parameters necessary from properties file.
 *
 * @throws IOException//from  ww w.  j  ava  2s  .  c  o  m
 */
@Override
public synchronized UserGroupInformation getProxiedUser(String userToProxy)
        throws HadoopSecurityManagerException {

    if (userToProxy == null) {
        throw new HadoopSecurityManagerException("userToProxy can't be null");
    }

    UserGroupInformation ugi = userUgiMap.get(userToProxy);
    if (ugi == null) {
        logger.info("proxy user " + userToProxy + " not exist. Creating new proxy user");
        if (shouldProxy) {
            try {
                ugi = UserGroupInformation.createProxyUser(userToProxy, UserGroupInformation.getLoginUser());
            } catch (IOException e) {
                throw new HadoopSecurityManagerException("Failed to create proxy user", e);
            }
        } else {
            ugi = UserGroupInformation.createRemoteUser(userToProxy);
        }
        userUgiMap.putIfAbsent(userToProxy, ugi);
    }
    return ugi;
}

From source file:cn.edu.buaa.act.petuumOnYarn.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException// w ww  .  ja va  2  s  . c  o  m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer
    // class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from
    // clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers;
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

}

From source file:co.cask.cdap.common.guice.FileContextProvider.java

License:Apache License

private UserGroupInformation createUGI() {
    String hdfsUser = cConf.get(Constants.CFG_HDFS_USER);
    try {// w w  w  . j a  v a 2 s  . co  m
        if (hdfsUser == null || UserGroupInformation.isSecurityEnabled()) {
            if (hdfsUser != null) {
                LOG.debug("Ignoring configuration {}={}, running on secure Hadoop", Constants.CFG_HDFS_USER,
                        hdfsUser);
            }
            LOG.debug("Getting filesystem for current user");
            return UserGroupInformation.getCurrentUser();
        } else {
            LOG.debug("Getting filesystem for user {}", hdfsUser);
            return UserGroupInformation.createRemoteUser(hdfsUser);
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}