Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:com.cloudera.lib.service.hadoop.HadoopService.java

License:Open Source License

@Override
public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor)
        throws HadoopException {
    Check.notEmpty(user, "user");
    Check.notNull(conf, "conf");
    Check.notNull(executor, "executor");
    if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) {
        throw new HadoopException(HadoopException.ERROR.H06, NAME_NODE_PROPERTY);
    }/*from  w  w w.j  av a 2 s . c  o  m*/
    try {
        validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
        UserGroupInformation ugi = getUGI(user);
        return ugi.doAs(new PrivilegedExceptionAction<T>() {
            public T run() throws Exception {
                Configuration namenodeConf = createNameNodeConf(conf);
                FileSystem fs = createFileSystem(namenodeConf);
                Instrumentation instrumentation = getServer().get(Instrumentation.class);
                Instrumentation.Cron cron = instrumentation.createCron();
                try {
                    checkNameNodeHealth(fs);
                    cron.start();
                    return executor.execute(fs);
                } finally {
                    cron.stop();
                    instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
                    closeFileSystem(fs);
                }
            }
        });
    } catch (HadoopException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new HadoopException(HadoopException.ERROR.H03, ex);
    }
}

From source file:com.cloudera.lib.service.hadoop.HadoopService.java

License:Open Source License

@Override
public <T> T execute(String user, final Configuration conf, final JobClientExecutor<T> executor)
        throws HadoopException {
    Check.notEmpty(user, "user");
    Check.notNull(conf, "conf");
    Check.notNull(executor, "executor");
    if (conf.get(JOB_TRACKER_PROPERTY) == null || conf.getTrimmed(JOB_TRACKER_PROPERTY).length() == 0) {
        throw new HadoopException(HadoopException.ERROR.H06, JOB_TRACKER_PROPERTY);
    }/*from  ww w. jav a2 s .  c o m*/
    if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) {
        throw new HadoopException(HadoopException.ERROR.H06, NAME_NODE_PROPERTY);
    }
    try {
        validateJobtracker(new URI(conf.get(JOB_TRACKER_PROPERTY)).getAuthority());
        validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
        UserGroupInformation ugi = getUGI(user);
        return ugi.doAs(new PrivilegedExceptionAction<T>() {
            public T run() throws Exception {
                JobConf jobtrackerConf = createJobTrackerConf(conf);
                Configuration namenodeConf = createNameNodeConf(conf);
                JobClient jobClient = createJobClient(jobtrackerConf);
                try {
                    checkJobTrackerHealth(jobClient);
                    FileSystem fs = createFileSystem(namenodeConf);
                    Instrumentation instrumentation = getServer().get(Instrumentation.class);
                    Instrumentation.Cron cron = instrumentation.createCron();
                    try {
                        checkNameNodeHealth(fs);
                        cron.start();
                        return executor.execute(jobClient, fs);
                    } finally {
                        cron.stop();
                        instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(),
                                cron);
                        closeFileSystem(fs);
                    }
                } finally {
                    closeJobClient(jobClient);
                }
            }
        });
    } catch (HadoopException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new HadoopException(HadoopException.ERROR.H04, ex);
    }
}

From source file:com.cloudera.lib.service.hadoop.HadoopService.java

License:Open Source License

public FileSystem createFileSystemInternal(String user, final Configuration conf)
        throws IOException, HadoopException {
    Check.notEmpty(user, "user");
    Check.notNull(conf, "conf");
    try {//w  ww. j  a  v a 2  s. c om
        validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
        UserGroupInformation ugi = getUGI(user);
        return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws Exception {
                Configuration namenodeConf = createNameNodeConf(conf);
                return createFileSystem(namenodeConf);
            }
        });
    } catch (IOException ex) {
        throw ex;
    } catch (HadoopException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new HadoopException(HadoopException.ERROR.H08, ex.getMessage(), ex);
    }
}

From source file:com.continuuity.weave.internal.yarn.ports.AMRMClientImpl.java

License:Apache License

@Override
public synchronized void start() {
    final YarnConfiguration conf = new YarnConfiguration(getConfig());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS,
            YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);

    UserGroupInformation currentUser;
    try {/*from  w  w  w.  ja v a 2 s  . c  o  m*/
        currentUser = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new YarnException(e);
    }

    if (UserGroupInformation.isSecurityEnabled()) {
        String tokenURLEncodedStr = System.getenv().get(ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
        Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();

        try {
            token.decodeFromUrlString(tokenURLEncodedStr);
        } catch (IOException e) {
            throw new YarnException(e);
        }

        SecurityUtil.setTokenService(token, rmAddress);
        if (LOG.isDebugEnabled()) {
            LOG.debug("AppMasterToken is " + token);
        }
        currentUser.addToken(token);
    }

    rmClient = currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {
        @Override
        public AMRMProtocol run() {
            return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf);
        }
    });
    LOG.debug("Connecting to ResourceManager at " + rmAddress);
    super.start();
}

From source file:com.datatorrent.stram.cli.ApexCli.java

License:Apache License

public static void main(final String[] args) throws Exception {
    final ApexCli shell = new ApexCli();
    shell.preImpersonationInit(args);//w  w  w .  j  av a 2  s .co  m
    String hadoopUserName = System.getenv("HADOOP_USER_NAME");
    if (UserGroupInformation.isSecurityEnabled() && StringUtils.isNotBlank(hadoopUserName)
            && !hadoopUserName.equals(UserGroupInformation.getLoginUser().getUserName())) {
        LOG.info("You ({}) are running as user {}", UserGroupInformation.getLoginUser().getUserName(),
                hadoopUserName);
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(hadoopUserName,
                UserGroupInformation.getLoginUser());
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                shell.mainHelper();
                return null;
            }
        });
    } else {
        shell.mainHelper();
    }
}

From source file:com.datatorrent.stram.cli.DTCli.java

License:Apache License

public static void main(final String[] args) throws Exception {
    final DTCli shell = new DTCli();
    shell.preImpersonationInit(args);//w  w w.  ja  v  a 2 s.  co m
    String hadoopUserName = System.getenv("HADOOP_USER_NAME");
    if (UserGroupInformation.isSecurityEnabled() && StringUtils.isNotBlank(hadoopUserName)
            && !hadoopUserName.equals(UserGroupInformation.getLoginUser().getUserName())) {
        LOG.info("You ({}) are running as user {}", UserGroupInformation.getLoginUser().getUserName(),
                hadoopUserName);
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(hadoopUserName,
                UserGroupInformation.getLoginUser());
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                shell.mainHelper();
                return null;
            }
        });
    } else {
        shell.mainHelper();
    }
}

From source file:com.datatorrent.stram.client.StramClientUtils.java

License:Apache License

public static <T> T doAs(String userName, PrivilegedExceptionAction<T> action) throws Exception {
    if (StringUtils.isNotBlank(userName)
            && !userName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
        LOG.info("Executing command as {}", userName);
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(userName,
                UserGroupInformation.getLoginUser());
        return ugi.doAs(action);
    } else {//from  w  w  w . j  ava  2 s .c  om
        LOG.info("Executing command as if there is no login info: {}", userName);
        return action.run();
    }
}

From source file:com.datatorrent.stram.security.StramUserLogin.java

License:Apache License

public static long refreshTokens(long tokenLifeTime, String destinationDir, String destinationFile,
        final Configuration conf, String hdfsKeyTabFile, final Credentials credentials,
        final InetSocketAddress rmAddress, final boolean renewRMToken) throws IOException {
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    //renew tokens
    final String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
    }/*from  w  w w  .  j a v  a2 s.co  m*/
    FileSystem fs = FileSystem.newInstance(conf);
    File keyTabFile;
    try {
        keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf);
    } finally {
        fs.close();
    }
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            UserGroupInformation.getCurrentUser().getUserName(), keyTabFile.getAbsolutePath());
    try {
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                FileSystem fs1 = FileSystem.newInstance(conf);
                YarnClient yarnClient = null;
                if (renewRMToken) {
                    yarnClient = YarnClient.createYarnClient();
                    yarnClient.init(conf);
                    yarnClient.start();
                }
                Credentials creds = new Credentials();
                try {
                    fs1.addDelegationTokens(tokenRenewer, creds);
                    if (renewRMToken) {
                        org.apache.hadoop.yarn.api.records.Token rmDelToken = yarnClient
                                .getRMDelegationToken(new Text(tokenRenewer));
                        Token<RMDelegationTokenIdentifier> rmToken = ConverterUtils.convertFromYarn(rmDelToken,
                                rmAddress);
                        creds.addToken(rmToken.getService(), rmToken);
                    }
                } finally {
                    fs1.close();
                    if (renewRMToken) {
                        yarnClient.stop();
                    }
                }
                credentials.addAll(creds);
                return null;
            }
        });
        UserGroupInformation.getCurrentUser().addCredentials(credentials);
    } catch (InterruptedException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    } catch (IOException e) {
        LOG.error("Error while renewing tokens ", e);
        expiryTime = System.currentTimeMillis();
    }
    LOG.debug("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("updated token: {}", token);
    }
    keyTabFile.delete();
    return expiryTime;
}

From source file:com.datatorrent.stram.util.SecureExecutor.java

License:Apache License

public static <T> T execute(final SecureExecutor.WorkLoad<T> workLoad) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
        return loginUser.doAs(new PrivilegedAction<T>() {
            @Override/*  w ww .  ja  va 2 s .  c o m*/
            public T run() {
                return workLoad.run();
            }
        });
    } else {
        return workLoad.run();
    }
}

From source file:com.ebay.jetstream.event.processor.hdfs.HdfsClient.java

License:MIT License

protected void initHdfs() {
    hdpConf = new Configuration();
    final String hdfsUrl = config.getHdfsUrl();
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(config.getUser());

    try {/*  ww w . j a  va  2 s  .co  m*/
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                hdpConf.set("hadoop.job.ugi", config.getUser());
                hdpConf.set("fs.defaultFS", hdfsUrl);
                if (hdfsUrl.startsWith("hdfs")) {
                    for (Object keyObj : config.getHadoopProperties().keySet()) {
                        String key = (String) keyObj;
                        hdpConf.set(key, config.getHadoopProperties().getProperty(key));
                    }
                    fs = new DistributedFileSystem();
                    fs.initialize(URI.create(hdfsUrl), hdpConf);
                } else {
                    fs = FileSystem.get(hdpConf);
                }
                LOGGER.log(Level.INFO, "Connected to HDFS with the following properties: hdfsUrl " + hdfsUrl);
                return null;
            }

        });
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, "Error initializing HdfsClient. Error:" + e);
    }
}