Example usage for org.apache.hadoop.security UserGroupInformation getShortUserName

List of usage examples for org.apache.hadoop.security UserGroupInformation getShortUserName

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getShortUserName.

Prototype

public String getShortUserName() 

Source Link

Document

Get the user's login name.

Usage

From source file:org.apache.flink.yarn.YarnApplicationMasterRunner.java

License:Apache License

/**
 * The instance entry point for the YARN application master. Obtains user group
 * information and calls the main work method {@link #runApplicationMaster()} as a
 * privileged action.//from w w  w  .  j  a  va2 s. c  om
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_CLIENT_USERNAME);
        require(yarnClientUsername != null, "YARN client user name environment variable {} not set",
                YarnConfigKeys.ENV_CLIENT_USERNAME);

        final UserGroupInformation currentUser;
        try {
            currentUser = UserGroupInformation.getCurrentUser();
        } catch (Throwable t) {
            throw new Exception("Cannot access UserGroupInformation information for current user", t);
        }

        LOG.info("YARN daemon runs as user {}. Running Flink Application Master/JobManager as user {}",
                currentUser.getShortUserName(), yarnClientUsername);

        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(yarnClientUsername);

        // transfer all security tokens, for example for authenticated HDFS and HBase access
        for (Token<?> token : currentUser.getTokens()) {
            ugi.addToken(token);
        }

        // run the actual work in a secured privileged action
        return ugi.doAs(new PrivilegedAction<Integer>() {
            @Override
            public Integer run() {
                return runApplicationMaster();
            }
        });
    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.flink.yarn.YarnTaskExecutorRunner.java

License:Apache License

/**
 * The instance entry point for the YARN task executor. Obtains user group
 * information and calls the main work method {@link #runTaskExecutor(org.apache.flink.configuration.Configuration)} as a
 * privileged action.//from  ww  w.jav a  2s .co  m
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
        final String localDirs = ENV.get(Environment.LOCAL_DIRS.key());
        LOG.info("Current working/local Directory: {}", localDirs);

        final String currDir = ENV.get(Environment.PWD.key());
        LOG.info("Current working Directory: {}", currDir);

        final String remoteKeytabPath = ENV.get(YarnConfigKeys.KEYTAB_PATH);
        LOG.info("TM: remote keytab path obtained {}", remoteKeytabPath);

        final String remoteKeytabPrincipal = ENV.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
        LOG.info("TM: remote keytab principal obtained {}", remoteKeytabPrincipal);

        final Configuration configuration = GlobalConfiguration.loadConfiguration(currDir);
        FileSystem.setDefaultScheme(configuration);

        // configure local directory
        String flinkTempDirs = configuration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, null);
        if (flinkTempDirs == null) {
            LOG.info("Setting directories for temporary file " + localDirs);
            configuration.setString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, localDirs);
        } else {
            LOG.info("Overriding YARN's temporary file directories with those "
                    + "specified in the Flink config: " + flinkTempDirs);
        }

        // tell akka to die in case of an error
        configuration.setBoolean(ConfigConstants.AKKA_JVM_EXIT_ON_FATAL_ERROR, true);

        String keytabPath = null;
        if (remoteKeytabPath != null) {
            File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
            keytabPath = f.getAbsolutePath();
            LOG.info("keytab path: {}", keytabPath);
        }

        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

        LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(),
                yarnClientUsername);

        org.apache.hadoop.conf.Configuration hadoopConfiguration = null;

        //To support Yarn Secure Integration Test Scenario
        File krb5Conf = new File(currDir, Utils.KRB5_FILE_NAME);
        if (krb5Conf.exists() && krb5Conf.canRead()) {
            String krb5Path = krb5Conf.getAbsolutePath();
            LOG.info("KRB5 Conf: {}", krb5Path);
            hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        }

        SecurityUtils.SecurityConfiguration sc;
        if (hadoopConfiguration != null) {
            sc = new SecurityUtils.SecurityConfiguration(configuration, hadoopConfiguration);
        } else {
            sc = new SecurityUtils.SecurityConfiguration(configuration);
        }

        if (keytabPath != null && remoteKeytabPrincipal != null) {
            configuration.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
            configuration.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
        }

        SecurityUtils.install(sc);

        return SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
                return runTaskExecutor(configuration);
            }
        });

    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.flink.yarn.YarnTaskManagerRunnerFactory.java

License:Apache License

/**
 * Creates a {@link YarnTaskManagerRunnerFactory.Runner}.
 *//* www  . java 2  s.  co  m*/
public static Runner create(String[] args, final Class<? extends YarnTaskManager> taskManager,
        Map<String, String> envs) throws IOException {

    EnvironmentInformation.logEnvironmentInfo(LOG, "YARN TaskManager", args);
    SignalHandler.register(LOG);
    JvmShutdownSafeguard.installAsShutdownHook(LOG);

    // try to parse the command line arguments
    final Configuration configuration;
    try {
        configuration = TaskManager.parseArgsAndLoadConfig(args);
    } catch (Throwable t) {
        LOG.error(t.getMessage(), t);
        System.exit(TaskManager.STARTUP_FAILURE_RETURN_CODE());
        return null;
    }

    // read the environment variables for YARN
    final String yarnClientUsername = envs.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
    final String localDirs = envs.get(Environment.LOCAL_DIRS.key());
    LOG.info("Current working/local Directory: {}", localDirs);

    final String currDir = envs.get(Environment.PWD.key());
    LOG.info("Current working Directory: {}", currDir);

    final String remoteKeytabPrincipal = envs.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
    LOG.info("TM: remoteKeytabPrincipal obtained {}", remoteKeytabPrincipal);

    BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs);

    // tell akka to die in case of an error
    configuration.setBoolean(AkkaOptions.JVM_EXIT_ON_FATAL_ERROR, true);

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

    LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(),
            yarnClientUsername);

    // Infer the resource identifier from the environment variable
    String containerID = Preconditions.checkNotNull(envs.get(YarnFlinkResourceManager.ENV_FLINK_CONTAINER_ID));
    final ResourceID resourceId = new ResourceID(containerID);
    LOG.info("ResourceID assigned for this container: {}", resourceId);

    File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
    if (remoteKeytabPrincipal != null && f.exists()) {
        // set keytab principal and replace path with the local path of the shipped keytab file in NodeManager
        configuration.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, f.getAbsolutePath());
        configuration.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
    }

    try {
        SecurityConfiguration sc = new SecurityConfiguration(configuration);
        SecurityUtils.install(sc);

        return new Runner(configuration, resourceId, taskManager);
    } catch (Exception e) {
        LOG.error("Exception occurred while building Task Manager runner", e);
        throw new RuntimeException(e);
    }

}

From source file:org.apache.hawq.pxf.service.utilities.SecuredHDFS.java

License:Apache License

/**
 * The function will verify the token with NameNode if available and will
 * create a UserGroupInformation./*from   w  ww.  ja va2  s . c o m*/
 *
 * Code in this function is copied from JspHelper.getTokenUGI
 *
 * @param identifier Delegation token identifier
 * @param password Delegation token password
 * @param kind the kind of token
 * @param service the service for this token
 * @param servletContext Jetty servlet context which contains the NN address
 *
 * @throws SecurityException Thrown when authentication fails
 */
private static void verifyToken(byte[] identifier, byte[] password, Text kind, Text service,
        ServletContext servletContext) {
    try {
        Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(identifier, password,
                kind, service);

        ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
        DataInputStream in = new DataInputStream(buf);
        DelegationTokenIdentifier id = new DelegationTokenIdentifier();
        id.readFields(in);

        final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(servletContext);
        if (nn != null) {
            nn.getNamesystem().verifyToken(id, token.getPassword());
        }

        UserGroupInformation userGroupInformation = id.getUser();
        userGroupInformation.addToken(token);
        LOG.debug("user " + userGroupInformation.getUserName() + " (" + userGroupInformation.getShortUserName()
                + ") authenticated");

        // re-login if necessary
        userGroupInformation.checkTGTAndReloginFromKeytab();
    } catch (IOException e) {
        throw new SecurityException("Failed to verify delegation token " + e, e);
    }
}

From source file:org.apache.hcatalog.templeton.SecureProxySupport.java

License:Apache License

private Token<?> getFSDelegationToken(String user, final Configuration conf)
        throws IOException, InterruptedException {
    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final UserGroupInformation ugi = UgiFactory.getUgi(user);

    final TokenWrapper twrapper = new TokenWrapper();
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
        public Object run() throws IOException {
            FileSystem fs = FileSystem.get(conf);
            twrapper.token = fs.getDelegationToken(ugi.getShortUserName());
            return null;
        }//from   w w w.ja  va  2  s.  c  o m
    });
    return twrapper.token;

}

From source file:org.apache.hive.hcatalog.streaming.HiveEndPoint.java

License:Apache License

/**
 * Acquire a new connection to MetaStore for streaming. To connect using Kerberos,
 *   'authenticatedUser' argument should have been used to do a kerberos login.  Additionally the
 *   'hive.metastore.kerberos.principal' setting should be set correctly either in hive-site.xml or
 *    in the 'conf' argument (if not null). If using hive-site.xml, it should be in classpath.
 *
 * @param createPartIfNotExists If true, the partition specified in the endpoint
 *                              will be auto created if it does not exist
 * @param conf               HiveConf object to be used for the connection. Can be null.
 * @param authenticatedUser  UserGroupInformation object obtained from successful authentication.
 *                           Uses non-secure mode if this argument is null.
 * @param agentInfo should uniquely identify the process/entity that is using this batch.  This
 *                  should be something that can be correlated with calling application log files
 *                  and/or monitoring consoles.
 * @return//from   w w w . ja  v a 2s.  com
 * @throws ConnectionError if there is a connection problem
 * @throws InvalidPartition  if specified partition is not valid (createPartIfNotExists = false)
 * @throws ImpersonationFailed  if not able to impersonate 'username'
 * @throws PartitionCreationFailed if failed to create partition
 * @throws InterruptedException
 */
public StreamingConnection newConnection(final boolean createPartIfNotExists, final HiveConf conf,
        final UserGroupInformation authenticatedUser, final String agentInfo) throws ConnectionError,
        InvalidPartition, InvalidTable, PartitionCreationFailed, ImpersonationFailed, InterruptedException {

    if (authenticatedUser == null) {
        return newConnectionImpl(authenticatedUser, createPartIfNotExists, conf, agentInfo);
    }

    try {
        return authenticatedUser.doAs(new PrivilegedExceptionAction<StreamingConnection>() {
            @Override
            public StreamingConnection run()
                    throws ConnectionError, InvalidPartition, InvalidTable, PartitionCreationFailed {
                return newConnectionImpl(authenticatedUser, createPartIfNotExists, conf, agentInfo);
            }
        });
    } catch (IOException e) {
        throw new ConnectionError("Failed to connect as : " + authenticatedUser.getShortUserName(), e);
    }
}

From source file:org.apache.hive.hcatalog.templeton.SecureProxySupport.java

License:Apache License

private Token<?>[] getFSDelegationToken(String user, final Configuration conf)
        throws IOException, InterruptedException {
    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final UserGroupInformation ugi = UgiFactory.getUgi(user);

    final TokenWrapper twrapper = new TokenWrapper();
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
        public Object run() throws IOException, URISyntaxException {
            Credentials creds = new Credentials();
            //get Tokens for default FS.  Not all FSs support delegation tokens, e.g. WASB
            collectTokens(FileSystem.get(conf), twrapper, creds, ugi.getShortUserName());
            //get tokens for all other known FSs since Hive tables may result in different ones
            //passing "creds" prevents duplicate tokens from being added
            Collection<String> URIs = conf.getStringCollection("mapreduce.job.hdfs-servers");
            for (String uri : URIs) {
                LOG.debug("Getting tokens for " + uri);
                collectTokens(FileSystem.get(new URI(uri), conf), twrapper, creds, ugi.getShortUserName());
            }//from w  ww . j  a va2  s. c o m
            return null;
        }
    });
    return twrapper.tokens;
}

From source file:org.apache.hive.minikdc.TestMiniHiveKdc.java

License:Apache License

@Test
public void testLogin() throws Exception {
    String servicePrinc = miniHiveKdc.getHiveServicePrincipal();
    assertNotNull(servicePrinc);//from w w w .  jav a 2 s .c  o m
    miniHiveKdc.loginUser(servicePrinc);
    assertTrue(UserGroupInformation.isLoginKeytabBased());
    UserGroupInformation ugi = Utils.getUGI();
    assertEquals(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL, ugi.getShortUserName());
}

From source file:org.apache.hive.service.cli.operation.SQLOperation.java

License:Apache License

@Override
public void runInternal() throws HiveSQLException {
    setState(OperationState.PENDING);//from  w w w .  ja  va 2s . c o  m
    final HiveConf opConfig = getConfigForOperation();
    prepare(opConfig);
    if (!shouldRunAsync()) {
        runQuery(opConfig);
    } else {
        // We'll pass ThreadLocals in the background thread from the foreground (handler) thread
        final SessionState parentSessionState = SessionState.get();
        // ThreadLocal Hive object needs to be set in background thread.
        // The metastore client in Hive is associated with right user.
        final Hive parentHive = getSessionHive();
        // Current UGI will get used by metastore when metsatore is in embedded mode
        // So this needs to get passed to the new background thread
        final UserGroupInformation currentUGI = getCurrentUGI(opConfig);
        // Runnable impl to call runInternal asynchronously,
        // from a different thread
        Runnable backgroundOperation = new Runnable() {
            @Override
            public void run() {
                PrivilegedExceptionAction<Object> doAsAction = new PrivilegedExceptionAction<Object>() {
                    @Override
                    public Object run() throws HiveSQLException {
                        Hive.set(parentHive);
                        SessionState.setCurrentSessionState(parentSessionState);
                        // Set current OperationLog in this async thread for keeping on saving query log.
                        registerCurrentOperationLog();
                        try {
                            runQuery(opConfig);
                        } catch (HiveSQLException e) {
                            setOperationException(e);
                            LOG.error("Error running hive query: ", e);
                        } finally {
                            unregisterOperationLog();
                        }
                        return null;
                    }
                };

                try {
                    currentUGI.doAs(doAsAction);
                } catch (Exception e) {
                    setOperationException(new HiveSQLException(e));
                    LOG.error("Error running hive query as user : " + currentUGI.getShortUserName(), e);
                } finally {
                    /**
                     * We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup
                     * when this thread is garbage collected later.
                     * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize()
                     */
                    if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) {
                        ThreadWithGarbageCleanup currentThread = (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup
                                .currentThread();
                        currentThread.cacheThreadLocalRawStore();
                    }
                }
            }
        };
        try {
            // This submit blocks if no background threads are available to run this operation
            Future<?> backgroundHandle = getParentSession().getSessionManager()
                    .submitBackgroundOperation(backgroundOperation);
            setBackgroundHandle(backgroundHandle);
        } catch (RejectedExecutionException rejected) {
            setState(OperationState.ERROR);
            throw new HiveSQLException("The background threadpool cannot accept"
                    + " new task for execution, please retry the operation", rejected);
        }
    }
}

From source file:org.apache.hive.streaming.HiveStreamingConnection.java

License:Apache License

private HiveStreamingConnection(Builder builder) throws StreamingException {
    this.database = builder.database.toLowerCase();
    this.table = builder.table.toLowerCase();
    this.staticPartitionValues = builder.staticPartitionValues;
    this.conf = builder.hiveConf;
    this.agentInfo = builder.agentInfo;
    this.streamingOptimizations = builder.streamingOptimizations;
    this.writeId = builder.writeId;
    this.statementId = builder.statementId;
    this.tableObject = builder.tableObject;
    this.setPartitionedTable(builder.isPartitioned);
    this.manageTransactions = builder.manageTransactions;

    UserGroupInformation loggedInUser = null;
    try {//from   w  w w . j  av  a  2  s .  c o  m
        loggedInUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        LOG.warn("Unable to get logged in user via UGI. err: {}", e.getMessage());
    }
    if (loggedInUser == null) {
        this.username = System.getProperty("user.name");
        this.secureMode = false;
    } else {
        this.username = loggedInUser.getShortUserName();
        this.secureMode = loggedInUser.hasKerberosCredentials();
    }
    this.transactionBatchSize = builder.transactionBatchSize;
    this.recordWriter = builder.recordWriter;
    this.connectionStats = new ConnectionStats();
    if (agentInfo == null) {
        try {
            agentInfo = username + ":" + InetAddress.getLocalHost().getHostName() + ":"
                    + Thread.currentThread().getName();
        } catch (UnknownHostException e) {
            // ignore and use UUID instead
            this.agentInfo = UUID.randomUUID().toString();
        }
    }
    if (conf == null) {
        conf = createHiveConf(this.getClass(), DEFAULT_METASTORE_URI);
    }

    overrideConfSettings(conf);
    if (manageTransactions) {
        this.metastoreUri = conf.get(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName());
        this.msClient = getMetaStoreClient(conf, metastoreUri, secureMode, "streaming-connection");
        // We use a separate metastore client for heartbeat calls to ensure heartbeat RPC calls are
        // isolated from the other transaction related RPC calls.
        this.heartbeatMSClient = getMetaStoreClient(conf, metastoreUri, secureMode,
                "streaming-connection-heartbeat");
        validateTable();
    }

    LOG.info("STREAMING CONNECTION INFO: {}", toConnectionInfoString());
}