Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:org.apache.storm.hdfs.security.AutoHDFSNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
private byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration,
        final String topologySubmitterUser) {
    try {//from ww  w  .j ava  2  s  .c o  m
        if (UserGroupInformation.isSecurityEnabled()) {
            login(configuration);

            final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI)
                    ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString())
                    : FileSystem.getDefaultUri(configuration);

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() {
                @Override
                public Object run() {
                    try {
                        FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration);
                        Credentials credential = proxyUser.getCredentials();

                        if (configuration.get(STORM_USER_NAME_KEY) == null) {
                            configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal);
                        }

                        fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential);
                        LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser);
                        return credential;
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            });

            ByteArrayOutputStream bao = new ByteArrayOutputStream();
            ObjectOutputStream out = new ObjectOutputStream(bao);

            creds.write(out);
            out.flush();
            out.close();

            return bao.toByteArray();
        } else {
            throw new RuntimeException("Security is not enabled for HDFS");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hive.security.AutoHive.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration) {
    try {// w  ww  .j av a 2s  .com
        if (UserGroupInformation.isSecurityEnabled()) {
            String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL);
            String hiveMetaStoreURI = getMetaStoreURI(configuration);
            String hiveMetaStorePrincipal = getMetaStorePrincipal(configuration);
            HiveConf hcatConf = createHiveConf(hiveMetaStoreURI, hiveMetaStorePrincipal);
            login(configuration);

            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    currentUser);
            try {
                Token<DelegationTokenIdentifier> delegationTokenId = getDelegationToken(hcatConf,
                        hiveMetaStorePrincipal, topologySubmitterUser);
                proxyUser.addToken(delegationTokenId);
                LOG.info("Obtained Hive tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();
                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } catch (Exception ex) {
                LOG.debug(" Exception" + ex.getMessage());
                throw ex;
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hive.security.AutoHiveNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration,
        final String topologySubmitterUser) {
    try {/*w ww  . ja  v  a 2s . c  o  m*/
        if (UserGroupInformation.isSecurityEnabled()) {
            String hiveMetaStoreURI = getMetaStoreURI(configuration);
            String hiveMetaStorePrincipal = getMetaStorePrincipal(configuration);
            HiveConf hcatConf = createHiveConf(hiveMetaStoreURI, hiveMetaStorePrincipal);
            login(configuration);

            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    currentUser);
            try {
                Token<DelegationTokenIdentifier> delegationTokenId = getDelegationToken(hcatConf,
                        hiveMetaStorePrincipal, topologySubmitterUser);
                proxyUser.addToken(delegationTokenId);
                LOG.info("Obtained Hive tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();
                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } catch (Exception ex) {
                LOG.debug(" Exception" + ex.getMessage());
                throw ex;
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.sysml.api.DMLScript.java

License:Apache License

private static void checkSecuritySetup(DMLConfig config) throws IOException, DMLRuntimeException {
    //analyze local configuration
    String userName = System.getProperty("user.name");
    HashSet<String> groupNames = new HashSet<>();
    try {/*from  w ww  . jav  a2 s.c  o m*/
        //check existence, for backwards compatibility to < hadoop 0.21
        if (UserGroupInformation.class.getMethod("getCurrentUser") != null) {
            String[] groups = UserGroupInformation.getCurrentUser().getGroupNames();
            Collections.addAll(groupNames, groups);
        }
    } catch (Exception ex) {
    }

    //analyze hadoop configuration
    JobConf job = ConfigurationManager.getCachedJobConf();
    boolean localMode = InfrastructureAnalyzer.isLocalMode(job);
    String taskController = job.get(MRConfigurationNames.MR_TASKTRACKER_TASKCONTROLLER,
            "org.apache.hadoop.mapred.DefaultTaskController");
    String ttGroupName = job.get(MRConfigurationNames.MR_TASKTRACKER_GROUP, "null");
    String perm = job.get(MRConfigurationNames.DFS_PERMISSIONS_ENABLED, "null"); //note: job.get("dfs.permissions.supergroup",null);
    URI fsURI = FileSystem.getDefaultUri(job);

    //determine security states
    boolean flagDiffUser = !(taskController.equals("org.apache.hadoop.mapred.LinuxTaskController") //runs map/reduce tasks as the current user
            || localMode // run in the same JVM anyway
            || groupNames.contains(ttGroupName)); //user in task tracker group 
    boolean flagLocalFS = fsURI == null || fsURI.getScheme().equals("file");
    boolean flagSecurity = perm.equals("yes");

    LOG.debug("SystemML security check: " + "local.user.name = " + userName + ", " + "local.user.groups = "
            + ProgramConverter.serializeStringCollection(groupNames) + ", "
            + MRConfigurationNames.MR_JOBTRACKER_ADDRESS + " = "
            + job.get(MRConfigurationNames.MR_JOBTRACKER_ADDRESS) + ", "
            + MRConfigurationNames.MR_TASKTRACKER_TASKCONTROLLER + " = " + taskController + ","
            + MRConfigurationNames.MR_TASKTRACKER_GROUP + " = " + ttGroupName + ", "
            + MRConfigurationNames.FS_DEFAULTFS + " = " + ((fsURI != null) ? fsURI.getScheme() : "null") + ", "
            + MRConfigurationNames.DFS_PERMISSIONS_ENABLED + " = " + perm);

    //print warning if permission issues possible
    if (flagDiffUser && (flagLocalFS || flagSecurity)) {
        LOG.warn("Cannot run map/reduce tasks as user '" + userName + "'. Using tasktracker group '"
                + ttGroupName + "'.");
    }
}

From source file:org.apache.tajo.cli.tools.TajoDump.java

License:Apache License

public static void main(String[] args) throws ParseException, IOException, ServiceException, SQLException {
    final TajoConf conf = new TajoConf();
    final CommandLineParser parser = new PosixParser();
    final CommandLine cmd = parser.parse(options, args);
    final Pair<String, Integer> hostAndPort = getConnectionAddr(conf, cmd);
    final String hostName = hostAndPort.getFirst();
    final Integer port = hostAndPort.getSecond();
    final UserGroupInformation userInfo = UserGroupInformation.getCurrentUser();

    String baseDatabaseName = null;
    if (cmd.getArgList().size() > 0) {
        baseDatabaseName = (String) cmd.getArgList().get(0);
    }/*from   w w  w. j a  v  a  2  s  . c  o  m*/

    boolean isDumpingAllDatabases = cmd.hasOption('a');

    // Neither two choices
    if (!isDumpingAllDatabases && baseDatabaseName == null) {
        printUsage();
        System.exit(-1);
    }

    TajoClient client = null;
    if ((hostName == null) ^ (port == null)) {
        System.err.println("ERROR: cannot find any TajoMaster rpc address in arguments and tajo-site.xml.");
        System.exit(-1);
    } else if (hostName != null && port != null) {
        conf.setVar(TajoConf.ConfVars.TAJO_MASTER_CLIENT_RPC_ADDRESS, hostName + ":" + port);
        client = new TajoClientImpl(conf);
    } else {
        client = new TajoClientImpl(conf);
    }

    PrintWriter writer = new PrintWriter(System.out);
    dump(client, userInfo, baseDatabaseName, isDumpingAllDatabases, true, true, writer);

    System.exit(0);
}

From source file:org.apache.tajo.client.SessionConnection.java

License:Apache License

/**
 * Connect to TajoMaster//  w  w  w .j  a v a 2s .  c o m
 *
 * @param conf TajoConf
 * @param addr TajoMaster address
 * @param baseDatabase The base database name. It is case sensitive. If it is null,
 *                     the 'default' database will be used.
 * @throws java.io.IOException
 */
public SessionConnection(TajoConf conf, InetSocketAddress addr, @Nullable String baseDatabase)
        throws IOException {
    this.conf = conf;
    this.conf.set("tajo.disk.scheduler.report.interval", "0");
    this.tajoMasterAddr = addr;
    int workerNum = conf.getIntVar(TajoConf.ConfVars.RPC_CLIENT_WORKER_THREAD_NUM);
    // Don't share connection pool per client
    connPool = RpcConnectionPool.newPool(conf, getClass().getSimpleName(), workerNum);
    userInfo = UserGroupInformation.getCurrentUser();
    this.baseDatabase = baseDatabase != null ? baseDatabase : null;
}

From source file:org.apache.tajo.client.TajoClient.java

License:Apache License

/**
 * Connect to TajoMaster//from ww  w  .j  ava  2s.c  om
 *
 * @param conf TajoConf
 * @param addr TajoMaster address
 * @param baseDatabase The base database name. It is case sensitive. If it is null,
 *                     the 'default' database will be used.
 * @throws IOException
 */
public TajoClient(TajoConf conf, InetSocketAddress addr, @Nullable String baseDatabase) throws IOException {
    this.conf = conf;
    this.conf.set("tajo.disk.scheduler.report.interval", "0");
    this.tajoMasterAddr = addr;
    int workerNum = conf.getIntVar(TajoConf.ConfVars.RPC_CLIENT_WORKER_THREAD_NUM);
    // Don't share connection pool per client
    connPool = RpcConnectionPool.newPool(conf, getClass().getSimpleName(), workerNum);
    userInfo = UserGroupInformation.getCurrentUser();
    this.baseDatabase = baseDatabase != null ? baseDatabase : null;
}

From source file:org.apache.tajo.client.TajoDump.java

License:Apache License

public static void main(String[] args) throws ParseException, IOException, ServiceException, SQLException {
    final TajoConf conf = new TajoConf();
    final CommandLineParser parser = new PosixParser();
    final CommandLine cmd = parser.parse(options, args);
    final Pair<String, Integer> hostAndPort = getConnectionAddr(conf, cmd);
    final String hostName = hostAndPort.getFirst();
    final Integer port = hostAndPort.getSecond();
    final UserGroupInformation userInfo = UserGroupInformation.getCurrentUser();

    String baseDatabaseName = null;
    if (cmd.getArgList().size() > 0) {
        baseDatabaseName = (String) cmd.getArgList().get(0);
    }//from   w  w  w .j a  v  a  2s.  c  o m

    boolean isDumpingAllDatabases = cmd.hasOption('a');

    // Neither two choices
    if (!isDumpingAllDatabases && baseDatabaseName == null) {
        printUsage();
        System.exit(-1);
    }

    TajoClient client = null;
    if ((hostName == null) ^ (port == null)) {
        System.err.println("ERROR: cannot find any TajoMaster rpc address in arguments and tajo-site.xml.");
        System.exit(-1);
    } else if (hostName != null && port != null) {
        conf.setVar(TajoConf.ConfVars.TAJO_MASTER_CLIENT_RPC_ADDRESS, hostName + ":" + port);
        client = new TajoClient(conf);
    } else {
        client = new TajoClient(conf);
    }

    PrintWriter writer = new PrintWriter(System.out);
    dump(client, userInfo, baseDatabaseName, isDumpingAllDatabases, true, true, writer);

    System.exit(0);
}

From source file:org.apache.tajo.client.TestTajoDump.java

License:Apache License

@Test
public void testDump1() throws Exception {
    if (!testingCluster.isHCatalogStoreRunning()) {
        executeString("CREATE TABLE \"" + getCurrentDatabase()
                + "\".\"TableName1\" (\"Age\" int, \"FirstName\" TEXT, lastname TEXT)");

        UserGroupInformation userInfo = UserGroupInformation.getCurrentUser();
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        PrintWriter printWriter = new PrintWriter(bos);
        TajoDump.dump(client, userInfo, getCurrentDatabase(), false, false, false, printWriter);
        printWriter.flush();//from  w ww.  ja  v a  2 s  .  c o m
        printWriter.close();
        assertStrings(new String(bos.toByteArray()));
        bos.close();
    }
}

From source file:org.apache.tajo.master.querymaster.QueryMasterTask.java

License:Apache License

/**
 * It initializes the final output and staging directory and sets
 * them to variables.//w  w w .  ja  va 2  s . c om
 */
public static Path initStagingDir(TajoConf conf, String queryId, QueryContext context) throws IOException {

    String realUser;
    String currentUser;
    UserGroupInformation ugi;
    ugi = UserGroupInformation.getLoginUser();
    realUser = ugi.getShortUserName();
    currentUser = UserGroupInformation.getCurrentUser().getShortUserName();

    FileSystem fs;
    Path stagingDir;

    ////////////////////////////////////////////
    // Create Output Directory
    ////////////////////////////////////////////

    String outputPath = context.get(QueryVars.OUTPUT_TABLE_PATH, "");
    if (context.isCreateTable() || context.isInsert()) {
        if (outputPath == null || outputPath.isEmpty()) {
            // hbase
            stagingDir = new Path(TajoConf.getDefaultRootStagingDir(conf), queryId);
        } else {
            stagingDir = StorageUtil.concatPath(context.getOutputPath(), TMP_STAGING_DIR_PREFIX, queryId);
        }
    } else {
        stagingDir = new Path(TajoConf.getDefaultRootStagingDir(conf), queryId);
    }

    // initializ
    fs = stagingDir.getFileSystem(conf);

    if (fs.exists(stagingDir)) {
        throw new IOException("The staging directory '" + stagingDir + "' already exists");
    }
    fs.mkdirs(stagingDir, new FsPermission(STAGING_DIR_PERMISSION));
    FileStatus fsStatus = fs.getFileStatus(stagingDir);
    String owner = fsStatus.getOwner();

    if (!owner.isEmpty() && !(owner.equals(currentUser) || owner.equals(realUser))) {
        throw new IOException("The ownership on the user's query " + "directory " + stagingDir
                + " is not as expected. " + "It is owned by " + owner + ". The directory must "
                + "be owned by the submitter " + currentUser + " or " + "by " + realUser);
    }

    if (!fsStatus.getPermission().equals(STAGING_DIR_PERMISSION)) {
        LOG.info("Permissions on staging directory " + stagingDir + " are " + "incorrect: "
                + fsStatus.getPermission() + ". Fixing permissions " + "to correct value "
                + STAGING_DIR_PERMISSION);
        fs.setPermission(stagingDir, new FsPermission(STAGING_DIR_PERMISSION));
    }

    Path stagingResultDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
    fs.mkdirs(stagingResultDir);

    return stagingDir;
}