Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:alluxio.yarn.ApplicationMaster.java

License:Apache License

/**
 * @param args Command line arguments to launch application master
 *//*from  w w w .  j  a  va2  s  .c  o  m*/
public static void main(String[] args) {
    Options options = new Options();
    options.addOption("num_workers", true, "Number of Alluxio workers to launch. Default 1");
    options.addOption("master_address", true, "(Required) Address to run Alluxio master");
    options.addOption("resource_path", true, "(Required) HDFS path containing the Application Master");

    try {
        LOG.info("Starting Application Master with args {}", Arrays.toString(args));
        final CommandLine cliParser = new GnuParser().parse(options, args);

        YarnConfiguration conf = new YarnConfiguration();
        UserGroupInformation.setConfiguration(conf);
        if (UserGroupInformation.isSecurityEnabled()) {
            String user = System.getenv("ALLUXIO_USER");
            UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
            for (Token token : UserGroupInformation.getCurrentUser().getTokens()) {
                ugi.addToken(token);
            }
            LOG.info("UserGroupInformation: " + ugi);
            ugi.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    runApplicationMaster(cliParser);
                    return null;
                }
            });
        } else {
            runApplicationMaster(cliParser);
        }
    } catch (Exception e) {
        LOG.error("Error running Application Master", e);
        System.exit(1);
    }
}

From source file:azkaban.jobExecutor.ProcessJob.java

License:Apache License

@Override
public void cancel() throws InterruptedException {
    if (process == null)
        throw new IllegalStateException("Not started.");
    boolean killed = process.softKill(KILL_TIME_MS, TimeUnit.MILLISECONDS);
    if (!killed) {
        warn("Kill with signal TERM failed. Killing with KILL signal.");
        process.hardKill();/*from  w  w w  .  j  av a  2s. c  o m*/
    }

    /**
     * process to kill hadoop job on the yarn cluster
     */
    String azExecId = jobProps.getString(CommonJobProperties.EXEC_ID);
    final String logFilePath = String.format("%s/_job.%s.%s.log", getWorkingDirectory(), azExecId, getId());
    Set<String> applicationIds = HadoopJobUtils.findApplicationIdFromLog(logFilePath, getLog());
    if (applicationIds != null && applicationIds.size() > 0) {
        Props props = new Props();
        props.putAll(getJobProps());
        props.putAll(getSysProps());
        Properties properties = new Properties();
        properties.putAll(jobProps.getFlattened());
        try {
            if (HadoopSecureWrapperUtils.shouldProxy(properties)) {
                File file = HadoopSecureWrapperUtils.getHadoopTokens(
                        HadoopJobUtils.loadHadoopSecurityManager(getSysProps(), getLog()), props, getLog());
                if (file != null) {
                    UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(properties,
                            file.getAbsolutePath(), getLog());
                    proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
                        @Override
                        public Void run() throws Exception {
                            HadoopJobUtils.killAllSpawnedHadoopJobs(logFilePath, getLog());
                            return null;
                        }
                    });
                }
            } else {
                HadoopJobUtils.killAllSpawnedHadoopJobs(logFilePath, getLog());
            }
        } catch (Throwable t) {
            Logger.getRootLogger().warn("something happened while trying to kill all spawned jobs", t);
        }
    }
    /**
     * finish to process kill job on the yarn cluster.
     */
}

From source file:azkaban.jobtype.connectors.HdfsToTeradataJobRunnerMain.java

License:Apache License

public void run() throws IOException, InterruptedException {
    String jobName = System.getenv(AbstractProcessJob.JOB_NAME_ENV);
    _logger.info("Running job " + jobName);

    if (HadoopSecureWrapperUtils.shouldProxy(_jobProps)) {
        String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);

        UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(_jobProps, tokenFile, _logger);

        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override//from  w  ww  . j a  v a 2 s  . c  om
            public Void run() throws Exception {
                copyHdfsToTd();
                return null;
            }
        });
    } else {
        copyHdfsToTd();
    }
}

From source file:azkaban.jobtype.connectors.teradata.HdfsToTeradataJobRunnerMain.java

License:Apache License

public void run() throws IOException, InterruptedException {
    String jobName = System.getenv(AbstractProcessJob.JOB_NAME_ENV);
    _logger.info("Running job " + jobName);
    preprocess();/*from ww  w  .  j a va2 s .  c o  m*/

    if (HadoopSecureWrapperUtils.shouldProxy(_jobProps)) {
        String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);

        UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(_jobProps, tokenFile, _logger);

        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                copyHdfsToTd();
                return null;
            }
        });
    } else {
        copyHdfsToTd();
    }
}

From source file:azkaban.jobtype.connectors.teradata.TeradataToHdfsJobRunnerMain.java

License:Apache License

public void run() throws IOException, InterruptedException {
    String jobName = System.getenv(AbstractProcessJob.JOB_NAME_ENV);
    _logger.info("Running job " + jobName);

    if (HadoopSecureWrapperUtils.shouldProxy(_jobProps)) {
        String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
        UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(_jobProps, tokenFile, _logger);

        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override//from   w ww .  ja  va  2s.  c  om
            public Void run() throws Exception {
                runCopyTdToHdfs();
                return null;
            }
        });
    } else {
        runCopyTdToHdfs();
    }
}

From source file:azkaban.jobtype.HadoopJavaJobRunnerMain.java

License:Apache License

private void runMethodAsUser(Properties props, final Object obj, final String runMethod,
        final UserGroupInformation ugi) throws IOException, InterruptedException {
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from  w  w w .  j  av a 2  s  . c  om
        public Void run() throws Exception {

            Configuration conf = new Configuration();
            if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
                conf.set(MAPREDUCE_JOB_CREDENTIALS_BINARY, System.getenv(HADOOP_TOKEN_FILE_LOCATION));
            }

            runMethod(obj, runMethod);
            return null;
        }
    });
}

From source file:azkaban.jobtype.HadoopJavaJobRunnerMain.java

License:Apache License

private static Object getObjectAsProxyUser(final Properties prop, final Logger logger, final String jobName,
        final String className, final UserGroupInformation ugi) throws Exception {

    Object obj = ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override//from  ww w  . jav  a 2 s.  c om
        public Object run() throws Exception {
            return getObject(jobName, className, prop, logger);
        }
    });

    return obj;
}

From source file:azkaban.jobtype.HadoopJobUtils.java

License:Apache License

/**
 * This method is a decorator around the KillAllSpawnedHadoopJobs method.
 * This method takes additional parameters to determine whether KillAllSpawnedHadoopJobs needs to be executed 
 * using doAs as a different user/*from  ww w.  ja v a  2 s.  c o m*/
 * 
 * @param logFilePath Azkaban log file path
 * @param jobProps Azkaban job props
 * @param tokenFile Pass in the tokenFile if value is known.  It is ok to skip if the token file is in the environmental variable
 * @param log a usable logger
 */
public static void proxyUserKillAllSpawnedHadoopJobs(final String logFilePath, Props jobProps, File tokenFile,
        final Logger log) {
    Properties properties = new Properties();
    properties.putAll(jobProps.getFlattened());

    try {
        if (HadoopSecureWrapperUtils.shouldProxy(properties)) {
            UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(properties,
                    tokenFile.getAbsolutePath(), log);
            proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    HadoopJobUtils.killAllSpawnedHadoopJobs(logFilePath, log);
                    return null;
                }
            });
        } else {
            HadoopJobUtils.killAllSpawnedHadoopJobs(logFilePath, log);
        }
    } catch (Throwable t) {
        log.warn("something happened while trying to kill all spawned jobs", t);
    }
}

From source file:azkaban.jobtype.HadoopSecureHiveWrapper.java

License:Apache License

public static void main(final String[] args) throws Exception {

    Properties jobProps = HadoopSecureWrapperUtils.loadAzkabanProps();
    HadoopConfigurationInjector.injectResources(new Props(null, jobProps));

    hiveScript = jobProps.getProperty("hive.script");

    if (HadoopSecureWrapperUtils.shouldProxy(jobProps)) {
        String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
        UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(jobProps, tokenFile, logger);
        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override/*  www. j a v  a2s .c o m*/
            public Void run() throws Exception {
                runHive(args);
                return null;
            }
        });
    } else {
        runHive(args);
    }
}

From source file:azkaban.jobtype.HadoopSecurePigWrapper.java

License:Apache License

public static void main(final String[] args) throws Exception {
    Properties jobProps = HadoopSecureWrapperUtils.loadAzkabanProps();
    props = new Props(null, jobProps);
    HadoopConfigurationInjector.injectResources(props);

    // special feature of secure pig wrapper: we will append the pig error file
    // onto system out
    pigLogFile = new File(System.getenv("PIG_LOG_FILE"));

    if (HadoopSecureWrapperUtils.shouldProxy(jobProps)) {
        String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
        UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(jobProps, tokenFile, logger);
        proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
            @Override//from   w w w.  j  ava 2s .  c  o m
            public Void run() throws Exception {
                runPigJob(args);
                return null;
            }
        });
    } else {
        runPigJob(args);
    }
}