Example usage for org.apache.hadoop.security UserGroupInformation getLoginUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getLoginUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getLoginUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getLoginUser() throws IOException 

Source Link

Document

Get the currently logged in user.

Usage

From source file:org.apache.falcon.entity.parser.ClusterEntityParser.java

License:Apache License

private void checkPathOwnerAndPermission(String clusterName, String location, FileSystem fs,
        FsPermission expectedPermission) throws ValidationException {

    Path locationPath = new Path(location);
    try {//from  w w w.  java2s.  co m
        if (!fs.exists(locationPath)) {
            throw new ValidationException(
                    "Location " + location + " for cluster " + clusterName + " must exist.");
        }

        // falcon owns this path on each cluster
        final String loginUser = UserGroupInformation.getLoginUser().getShortUserName();
        FileStatus fileStatus = fs.getFileStatus(locationPath);
        final String locationOwner = fileStatus.getOwner();
        if (!locationOwner.equals(loginUser)) {
            LOG.error("Owner of the location {} is {} for cluster {}. Current user {} is not the owner of the "
                    + "location.", locationPath, locationOwner, clusterName, loginUser);
            throw new ValidationException("Path [" + locationPath + "] on the cluster [" + clusterName
                    + "] has " + "owner [" + locationOwner + "]. Current user [" + loginUser
                    + "] is not the owner of the " + "path");
        }
        String errorMessage = "Path " + locationPath + " has permissions: "
                + fileStatus.getPermission().toString() + ", should be " + expectedPermission;
        if (fileStatus.getPermission().toShort() != expectedPermission.toShort()) {
            LOG.error(errorMessage);
            throw new ValidationException(errorMessage);
        }
        // try to list to see if the user is able to write to this folder
        fs.listStatus(locationPath);
    } catch (IOException e) {
        throw new ValidationException("Unable to validate the location with path: " + location + " for cluster:"
                + clusterName + " due to transient failures ", e);
    }
}

From source file:org.apache.falcon.entity.parser.ClusterEntityParserTest.java

License:Apache License

/**
 * A lightweight unit test for a cluster where location type working is missing.
 * It should automatically get generated
 * Extensive tests are found in ClusterEntityValidationIT.
 *//*from  w w w . j  a v a 2  s  .  c  om*/
@Test
public void testClusterWithOnlyStaging() throws Exception {
    ClusterEntityParser clusterEntityParser = Mockito
            .spy((ClusterEntityParser) EntityParserFactory.getParser(EntityType.CLUSTER));
    Cluster cluster = (Cluster) this.dfsCluster.getCluster().copy();
    Locations locations = getClusterLocations("staging2", null);
    cluster.setLocations(locations);
    Mockito.doNothing().when(clusterEntityParser).validateWorkflowInterface(cluster);
    Mockito.doNothing().when(clusterEntityParser).validateMessagingInterface(cluster);
    Mockito.doNothing().when(clusterEntityParser).validateRegistryInterface(cluster);
    String stagingPath = ClusterHelper.getLocation(cluster, ClusterLocationType.STAGING).getPath();
    this.dfsCluster.getFileSystem().mkdirs(new Path(stagingPath), HadoopClientFactory.ALL_PERMISSION);
    clusterEntityParser.validate(cluster);
    String workingDirPath = cluster.getLocations().getLocations().get(0).getPath() + "/working";
    Assert.assertEquals(ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath(),
            workingDirPath);
    FileStatus workingDirStatus = this.dfsCluster.getFileSystem().getFileLinkStatus(new Path(workingDirPath));
    Assert.assertTrue(workingDirStatus.isDirectory());
    Assert.assertEquals(workingDirStatus.getPermission(), HadoopClientFactory.READ_EXECUTE_PERMISSION);
    Assert.assertEquals(workingDirStatus.getOwner(), UserGroupInformation.getLoginUser().getShortUserName());

    FileStatus emptyDirStatus = this.dfsCluster.getFileSystem()
            .getFileStatus(new Path(stagingPath + "/" + ClusterHelper.EMPTY_DIR_NAME));
    Assert.assertEquals(emptyDirStatus.getPermission(), HadoopClientFactory.READ_ONLY_PERMISSION);
    Assert.assertEquals(emptyDirStatus.getOwner(), UserGroupInformation.getLoginUser().getShortUserName());

    String stagingSubdirFeed = cluster.getLocations().getLocations().get(0).getPath()
            + "/falcon/workflows/feed";
    String stagingSubdirProcess = cluster.getLocations().getLocations().get(0).getPath()
            + "/falcon/workflows/process";
    FileStatus stagingSubdirFeedStatus = this.dfsCluster.getFileSystem()
            .getFileLinkStatus(new Path(stagingSubdirFeed));
    FileStatus stagingSubdirProcessStatus = this.dfsCluster.getFileSystem()
            .getFileLinkStatus(new Path(stagingSubdirProcess));
    Assert.assertTrue(stagingSubdirFeedStatus.isDirectory());
    Assert.assertEquals(stagingSubdirFeedStatus.getPermission(), HadoopClientFactory.ALL_PERMISSION);
    Assert.assertTrue(stagingSubdirProcessStatus.isDirectory());
    Assert.assertEquals(stagingSubdirProcessStatus.getPermission(), HadoopClientFactory.ALL_PERMISSION);
}

From source file:org.apache.falcon.entity.parser.EntityParser.java

License:Apache License

/**
 * Checks if the acl owner is a valid user by fetching the groups for the owner.
 * Also checks if the acl group is one of the fetched groups for membership.
 * The only limitation is that a user cannot add a group in ACL that he does not belong to.
 *
 * @param acl  entity ACL/*  w  w w  . jav  a 2 s.  c om*/
 * @throws org.apache.falcon.entity.parser.ValidationException
 */
protected void validateACLOwnerAndGroup(AccessControlList acl) throws ValidationException {
    String aclOwner = acl.getOwner();
    String aclGroup = acl.getGroup();

    try {
        UserGroupInformation proxyACLUser = UserGroupInformation.createProxyUser(aclOwner,
                UserGroupInformation.getLoginUser());
        Set<String> groups = new HashSet<String>(Arrays.asList(proxyACLUser.getGroupNames()));
        if (!groups.contains(aclGroup)) {
            throw new AuthorizationException("Invalid group: " + aclGroup + " for user: " + aclOwner);
        }
    } catch (IOException e) {
        throw new ValidationException(
                "Invalid acl owner " + aclOwner + ", does not exist or does not belong to group: " + aclGroup);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * This method is only used by Falcon internally to talk to the config store on HDFS.
 *
 * @param uri file system URI for config store.
 * @return FileSystem created with the provided proxyUser/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 *//*from ww w .j a v a  2 s .  co m*/
public FileSystem createFalconFileSystem(final URI uri) throws FalconException {
    Validate.notNull(uri, "uri cannot be null");

    try {
        Configuration conf = new Configuration();
        if (UserGroupInformation.isSecurityEnabled()) {
            conf.set(SecurityUtil.NN_PRINCIPAL, StartupProperties.get().getProperty(SecurityUtil.NN_PRINCIPAL));
        }

        return createFileSystem(UserGroupInformation.getLoginUser(), uri, conf);
    } catch (IOException e) {
        throw new FalconException("Exception while getting FileSystem for: " + uri, e);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * This method is only used by Falcon internally to talk to the config store on HDFS.
 *
 * @param conf configuration.//ww w.  ja  v  a2s .co  m
 * @return FileSystem created with the provided proxyUser/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 */
public FileSystem createFalconFileSystem(final Configuration conf) throws FalconException {
    Validate.notNull(conf, "configuration cannot be null");

    String nameNode = getNameNode(conf);
    try {
        return createFileSystem(UserGroupInformation.getLoginUser(), new URI(nameNode), conf);
    } catch (URISyntaxException e) {
        throw new FalconException("Exception while getting FileSystem for: " + nameNode, e);
    } catch (IOException e) {
        throw new FalconException("Exception while getting FileSystem for: " + nameNode, e);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * Return a FileSystem created with the provided user for the specified URI.
 *
 * @param ugi user group information//from w  w w.  j a  va  2 s  .  com
 * @param uri  file system URI.
 * @param conf Configuration with all necessary information to create the FileSystem.
 * @return FileSystem created with the provided user/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 */
@SuppressWarnings("ResultOfMethodCallIgnored")
public FileSystem createFileSystem(UserGroupInformation ugi, final URI uri, final Configuration conf)
        throws FalconException {
    validateInputs(ugi, uri, conf);

    try {
        // prevent falcon impersonating falcon, no need to use doas
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            LOG.trace("Creating FS for the login user {}, impersonation not required", proxyUserName);
            return FileSystem.get(uri, conf);
        }

        LOG.trace("Creating FS impersonating user {}", proxyUserName);
        return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws Exception {
                return FileSystem.get(uri, conf);
            }
        });
    } catch (InterruptedException | IOException ex) {
        throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * Return a DistributedFileSystem created with the provided user for the specified URI.
 *
 * @param ugi user group information//from  w w w .ja va 2s  .co  m
 * @param uri  file system URI.
 * @param conf Configuration with all necessary information to create the FileSystem.
 * @return DistributedFileSystem created with the provided user/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 */
@SuppressWarnings("ResultOfMethodCallIgnored")
public DistributedFileSystem createDistributedFileSystem(UserGroupInformation ugi, final URI uri,
        final Configuration conf) throws FalconException {
    validateInputs(ugi, uri, conf);
    FileSystem returnFs;
    try {
        // prevent falcon impersonating falcon, no need to use doas
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            LOG.info("Creating Distributed FS for the login user {}, impersonation not required",
                    proxyUserName);
            returnFs = DistributedFileSystem.get(uri, conf);
        } else {
            LOG.info("Creating FS impersonating user {}", proxyUserName);
            returnFs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
                public FileSystem run() throws Exception {
                    return DistributedFileSystem.get(uri, conf);
                }
            });
        }

        return (DistributedFileSystem) returnFs;
    } catch (InterruptedException | IOException ex) {
        throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

private void validateInputs(UserGroupInformation ugi, final URI uri, final Configuration conf)
        throws FalconException {
    Validate.notNull(ugi, "ugi cannot be null");
    Validate.notNull(conf, "configuration cannot be null");

    try {//from w  w  w . j av a  2s  .  c  om
        if (UserGroupInformation.isSecurityEnabled()) {
            LOG.debug("Revalidating Auth Token with auth method {}",
                    UserGroupInformation.getLoginUser().getAuthenticationMethod().name());
            UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
        }
    } catch (IOException ioe) {
        throw new FalconException(
                "Exception while getting FileSystem. Unable to check TGT for user " + ugi.getShortUserName(),
                ioe);
    }

    validateNameNode(uri, conf);
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * This method validates if the execute url is able to reach the MR endpoint.
 *
 * @param executeUrl jt url or RM url/*ww w. j a  va2  s  .  c o m*/
 * @throws IOException
 */
public void validateJobClient(String executeUrl, String rmPrincipal) throws IOException {
    final JobConf jobConf = new JobConf();
    jobConf.set(MR_JT_ADDRESS_KEY, executeUrl);
    jobConf.set(YARN_RM_ADDRESS_KEY, executeUrl);
    /**
     * It is possible that the RM/JT principal can be different between clusters,
     * for example, the cluster is using a different KDC with cross-domain trust
     * with the Falcon KDC.   in that case, we want to allow the user to provide
     * the RM principal similar to NN principal.
     */
    if (UserGroupInformation.isSecurityEnabled() && StringUtils.isNotEmpty(rmPrincipal)) {
        jobConf.set(SecurityUtil.RM_PRINCIPAL, rmPrincipal);
    }
    UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
    try {
        JobClient jobClient = loginUser.doAs(new PrivilegedExceptionAction<JobClient>() {
            public JobClient run() throws Exception {
                return new JobClient(jobConf);
            }
        });

        jobClient.getClusterStatus().getMapTasks();
    } catch (InterruptedException e) {
        throw new IOException("Exception creating job client:" + e.getMessage(), e);
    }
}

From source file:org.apache.falcon.recipe.RecipeTool.java

License:Apache License

private FileSystem getFileSystemForHdfs(final Properties recipeProperties, final Configuration conf)
        throws Exception {
    String storageEndpoint = RecipeToolOptions.CLUSTER_HDFS_WRITE_ENDPOINT.getName();
    String nameNode = recipeProperties.getProperty(storageEndpoint);
    conf.set(FS_DEFAULT_NAME_KEY, nameNode);
    if (UserGroupInformation.isSecurityEnabled()) {
        String nameNodePrincipal = recipeProperties
                .getProperty(RecipeToolOptions.RECIPE_NN_PRINCIPAL.getName());
        conf.set(NN_PRINCIPAL, nameNodePrincipal);
    }// w  w w. j a v  a  2s .c  o  m
    return createFileSystem(UserGroupInformation.getLoginUser(), new URI(nameNode), conf);
}