Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:co.cask.cdap.data2.datafabric.dataset.DatasetServiceClient.java

License:Apache License

private HttpRequest.Builder addUserIdHeader(HttpRequest.Builder builder) throws DatasetManagementException {
    if (!securityEnabled || !authorizationEnabled) {
        return builder;
    }//from   w w  w . j  a  v a 2s  . com

    String currUserShortName;
    try {
        currUserShortName = UserGroupInformation.getCurrentUser().getShortUserName();
    } catch (IOException e) {
        throw new DatasetManagementException("Unable to get the current user", e);
    }

    // If the request originated from the router and was forwarded to any service other than dataset service, before
    // going to dataset service via dataset service client, the userId could be set in the SecurityRequestContext.
    // e.g. deploying an app that contains a dataset.
    // For user datasets, if a dataset call originates from a program runtime, then find the userId from
    // UserGroupInformation#getCurrentUser()
    String userId = authenticationContext.getPrincipal().getName();
    if (NamespaceId.SYSTEM.equals(namespaceId)) {
        // For getting a system dataset like MDS, use the system principal, if the current user is the same as the
        // CDAP kerberos principal. If a user tries to access a system dataset from an app, either:
        // 1. The request will go through if the user is impersonating as the cdap principal - which means that
        // impersonation has specifically been configured in this namespace to use the cdap principal; or
        // 2. The request will fail, if kerberos is enabled and the user is impersonating as a non-cdap user; or
        // 3. The request will go through, if kerberos is disabled, since the impersonating user will be cdap

        // we compare short name, because in some YARN containers launched by CDAP, the current username isn't the full
        // configured principal
        if (!kerberosEnabled || currUserShortName.equals(masterShortUserName)) {
            LOG.trace(
                    "Accessing dataset in system namespace using the system principal because the current user "
                            + "{} is the same as the CDAP master user {}.",
                    currUserShortName, masterShortUserName);
            userId = currUserShortName;
        }
    }
    return builder.addHeader(Constants.Security.Headers.USER_ID, userId);
}

From source file:co.cask.cdap.data2.datafabric.dataset.service.executor.DatasetAdminService.java

License:Apache License

private static UserGroupInformation getUgiForDataset(Impersonator impersonator, DatasetId datasetInstanceId)
        throws IOException, NamespaceNotFoundException {
    // for system dataset do not look up owner information in store as we know that it will be null.
    // Also, this is required for CDAP to start, because initially we don't want to look up owner admin
    // (causing its own lookup) as the SystemDatasetInitiator.getDataset is called when CDAP starts
    UserGroupInformation ugi;//from  w  w w. j  a va  2s . co  m
    if (NamespaceId.SYSTEM.equals(datasetInstanceId.getParent())) {
        ugi = UserGroupInformation.getCurrentUser();
    } else {
        ugi = impersonator.getUGI(datasetInstanceId);
    }
    LOG.debug("Using {} user for dataset {}", ugi.getUserName(), datasetInstanceId);
    return ugi;
}

From source file:co.cask.cdap.data2.dataset2.lib.file.FileSetAdmin.java

License:Apache License

/**
 * @param truncating whether this call to create() is part of a truncate() operation. The effect is:
 *                   If possessExisting is true, then the truncate() has just dropped this
 *                   dataset and that deleted the base directory: we must recreate it.
 *//*from  w w w . ja v a  2s  .c om*/
private void create(boolean truncating) throws IOException {
    if (isExternal) {
        validateExists(FileSetProperties.DATA_EXTERNAL);
    } else if (useExisting) {
        validateExists(FileSetProperties.DATA_USE_EXISTING);
    } else if (!truncating && possessExisting) {
        validateExists(FileSetProperties.DATA_POSSESS_EXISTING);
    } else {
        if (exists()) {
            throw new IOException(String.format("Base location for file set '%s' at %s already exists",
                    spec.getName(), baseLocation));
        }
        String permissions = FileSetProperties.getFilePermissions(spec.getProperties());
        String group = FileSetProperties.getFileGroup(spec.getProperties());
        if (group == null) {
            String[] groups = UserGroupInformation.getCurrentUser().getGroupNames();
            if (groups.length > 0) {
                group = groups[0];
            }
        }

        // we can't simply mkdirs() the base location, because we need to set the group id on
        // every directory we create. Thus find the first ancestor of the base that does not exist:
        Location ancestor = baseLocation;
        Location firstDirToCreate = null;
        while (ancestor != null && !ancestor.exists()) {
            firstDirToCreate = ancestor;
            ancestor = Locations.getParent(ancestor);
        }
        // it is unlikely to be null: only if it was created after the exists() call above
        if (firstDirToCreate != null) {
            if (null == permissions) {
                firstDirToCreate.mkdirs();
            } else {
                firstDirToCreate.mkdirs(permissions);
            }
            if (group != null) {
                try {
                    firstDirToCreate.setGroup(group);
                } catch (Exception e) {
                    LOG.warn(
                            "Failed to set group {} for base location {} of file set {}: {}. Please set it manually.",
                            group, firstDirToCreate.toURI().toString(), spec.getName(), e.getMessage());
                }
            }
            // all following directories are created with the same group id as their parent
            if (null == permissions) {
                baseLocation.mkdirs();
            } else {
                baseLocation.mkdirs(permissions);
            }
        }
    }
}

From source file:co.cask.cdap.data2.dataset2.lib.FileSetTest.java

License:Apache License

@Test
public void testPermissions() throws Exception {

    String fsPermissions = "rwxrwx--x";
    String customPermissions = "rwx--x--x";
    String group = UserGroupInformation.getCurrentUser().getPrimaryGroupName();

    // create one file set with default permission so that the namespace data dir exists
    dsFrameworkUtil.createInstance("fileSet", OTHER_NAMESPACE.dataset("dummy"), DatasetProperties.EMPTY);

    // determine the default permissions of created directories (we want to test with different perms)
    String defaultPermissions = dsFrameworkUtil.getInjector().getInstance(NamespacedLocationFactory.class)
            .get(OTHER_NAMESPACE.toId()).getPermissions();
    if (fsPermissions.equals(defaultPermissions)) {
        // swap the permissions so we can test with different file set permissions than the default
        customPermissions = "rwxrwx--x";
        fsPermissions = "rwx--x--x";
    }//from  w  w w.ja  v a 2s  .  c  om

    // create a dataset with configured permissions that are different from the default
    DatasetId datasetId = OTHER_NAMESPACE.dataset("testPermFS");
    dsFrameworkUtil.createInstance("fileSet", datasetId, FileSetProperties.builder()
            .setBasePath("perm/test/path").setFilePermissions(fsPermissions).setFileGroup(group).build());
    FileSet fs = dsFrameworkUtil.getInstance(datasetId);

    // validate that the entire hierarchy of directories was created with the correct permissions
    Location base = fs.getBaseLocation();
    Assert.assertEquals(group, base.getGroup());
    Assert.assertEquals(fsPermissions, base.getPermissions());
    Location parent = Locations.getParent(base);
    Assert.assertNotNull(parent);
    Assert.assertEquals(group, parent.getGroup());
    Assert.assertEquals(fsPermissions, parent.getPermissions());
    parent = Locations.getParent(parent);
    Assert.assertNotNull(parent);
    Assert.assertEquals(group, parent.getGroup());
    Assert.assertEquals(fsPermissions, parent.getPermissions());
    Location nsRoot = Locations.getParent(parent);
    Assert.assertNotNull(nsRoot);
    Assert.assertNotEquals(fsPermissions, nsRoot.getPermissions());

    // create an empty file and validate it is created with the fileset's permissions
    Location child = base.append("a");
    Location grandchild = child.append("b");
    grandchild.getOutputStream().close();
    Assert.assertEquals(group, child.getGroup());
    Assert.assertEquals(group, grandchild.getGroup());
    Assert.assertEquals(fsPermissions, child.getPermissions());
    Assert.assertEquals(fsPermissions, grandchild.getPermissions());

    // create an empty file with custom permissions and validate them
    child = base.append("x");
    grandchild = child.append("y");
    grandchild.getOutputStream(customPermissions).close();
    Assert.assertEquals(group, child.getGroup());
    Assert.assertEquals(group, grandchild.getGroup());
    Assert.assertEquals(customPermissions, child.getPermissions());
    Assert.assertEquals(customPermissions, grandchild.getPermissions());

    // instantiate the dataset with custom permissions in the runtime arguments
    fs = dsFrameworkUtil.getInstance(datasetId,
            ImmutableMap.of(FileSetProperties.PROPERTY_FILES_PERMISSIONS, customPermissions));

    // create an empty file with custom permissions and validate them
    base = fs.getBaseLocation();
    child = base.append("p");
    grandchild = child.append("q");
    grandchild.getOutputStream().close();
    Assert.assertEquals(group, child.getGroup());
    Assert.assertEquals(group, grandchild.getGroup());
    Assert.assertEquals(customPermissions, child.getPermissions());
    Assert.assertEquals(customPermissions, grandchild.getPermissions());
}

From source file:co.cask.cdap.data2.dataset2.lib.partitioned.PartitionedFileSetTest.java

License:Apache License

@BeforeClass
public static void setupPermissions() throws IOException {
    group = UserGroupInformation.getCurrentUser().getPrimaryGroupName();
    tablePermissions = ImmutableMap.of("@" + group, "RWX");
    // determine the default permissions of created directories (we want to test with different perms)
    Location loc = dsFrameworkUtil.getInjector().getInstance(NamespacedLocationFactory.class)
            .get(DatasetFrameworkTestUtil.NAMESPACE2_ID.toId());
    loc.mkdirs();//from  ww  w  . j  a va2  s  . co m
    loc = loc.append("permcheckfile");
    loc.createNew();
    String defaultPermissions = loc.getPermissions();
    fsPermissions = "rwxrwx--x";
    if (fsPermissions.equals(defaultPermissions)) {
        // swap the permissions so we can test with different file set permissions than the default
        fsPermissions = "rwx--x--x";
    }
}

From source file:co.cask.cdap.explore.security.HiveTokenUtils.java

License:Apache License

public static Credentials obtainToken(Credentials credentials) {
    ClassLoader hiveClassloader = ExploreServiceUtils.getExploreClassLoader();
    ClassLoader contextClassloader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(hiveClassloader);

    try {/* ww w .j  av a2s .co m*/
        LOG.info("Obtaining delegation token for Hive");
        Class hiveConfClass = hiveClassloader.loadClass("org.apache.hadoop.hive.conf.HiveConf");
        Object hiveConf = hiveConfClass.newInstance();

        Class hiveClass = hiveClassloader.loadClass("org.apache.hadoop.hive.ql.metadata.Hive");
        @SuppressWarnings("unchecked")
        Method hiveGet = hiveClass.getMethod("get", hiveConfClass);
        Object hiveObject = hiveGet.invoke(null, hiveConf);

        String user = UserGroupInformation.getCurrentUser().getShortUserName();
        @SuppressWarnings("unchecked")
        Method getDelegationToken = hiveClass.getMethod("getDelegationToken", String.class, String.class);
        String tokenStr = (String) getDelegationToken.invoke(hiveObject, user, user);

        Token<DelegationTokenIdentifier> delegationToken = new Token<>();
        delegationToken.decodeFromUrlString(tokenStr);
        delegationToken.setService(new Text(HiveAuthFactory.HS2_CLIENT_TOKEN));
        LOG.info("Adding delegation token {} from MetaStore for service {} for user {}", delegationToken,
                delegationToken.getService(), user);
        credentials.addToken(delegationToken.getService(), delegationToken);
        return credentials;
    } catch (Exception e) {
        LOG.error("Exception when fetching delegation token from Hive MetaStore", e);
        throw Throwables.propagate(e);
    } finally {
        Thread.currentThread().setContextClassLoader(contextClassloader);
    }
}

From source file:co.cask.cdap.explore.service.hive.BaseHiveExploreService.java

License:Apache License

/**
 * Updates the token store to be used for the hive job, based upon the Explore container's credentials.
 * This is because twill doesn't update the container_tokens on upon token refresh.
 * See: https://issues.apache.org/jira/browse/TWILL-170
 *//* w  ww . ja v  a  2s .  c  o  m*/
private void updateTokenStore() throws IOException, ExploreException {
    String hadoopTokenFileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
    if (hadoopTokenFileLocation == null) {
        LOG.warn("Skipping update of token store due to failure to find environment variable '{}'.",
                UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        return;
    }

    Path credentialsFile = Paths.get(hadoopTokenFileLocation);

    FileAttribute<Set<PosixFilePermission>> originalPermissionAttributes = PosixFilePermissions
            .asFileAttribute(Files.getPosixFilePermissions(credentialsFile));

    Path tmpFile = Files.createTempFile(credentialsFile.getParent(), "credentials.store", null,
            originalPermissionAttributes);
    LOG.debug("Writing to temporary file: {}", tmpFile);

    try (DataOutputStream os = new DataOutputStream(Files.newOutputStream(tmpFile))) {
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        credentials.writeTokenStorageToStream(os);
    }

    Files.move(tmpFile, credentialsFile, StandardCopyOption.ATOMIC_MOVE);
    LOG.debug("Secure store saved to {}", credentialsFile);
}

From source file:co.cask.cdap.explore.service.hive.OperationInfo.java

License:Apache License

OperationInfo(SessionHandle sessionHandle, OperationHandle operationHandle, Map<String, String> sessionConf,
        String statement, long timestamp, String hiveDatabase, boolean readOnly) {
    this.sessionHandle = sessionHandle;
    this.operationHandle = operationHandle;
    this.sessionConf = sessionConf;
    this.statement = statement;
    this.timestamp = timestamp;
    this.hiveDatabase = hiveDatabase;
    this.readOnly = readOnly;
    try {/*from ww  w.  j  a v a  2  s  . c o  m*/
        // maintain the UGI who created this operation, to use for future operations
        this.ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.internal.app.namespace.AbstractStorageProviderNamespaceAdmin.java

License:Apache License

private void createLocation(NamespaceMeta namespaceMeta) throws IOException {
    NamespaceId namespaceId = namespaceMeta.getNamespaceId();
    boolean createdHome = false;
    Location namespaceHome;/*  w  ww . j  ava2  s  .c o m*/
    if (hasCustomLocation(namespaceMeta)) {
        namespaceHome = validateCustomLocation(namespaceMeta);
    } else {
        // no namespace custom location was provided one must be created by cdap
        namespaceHome = namespacedLocationFactory.get(namespaceMeta);
        if (namespaceHome.exists()) {
            throw new FileAlreadyExistsException(namespaceHome.toString());
        }
        createdHome = createNamespaceDir(namespaceHome, "home", namespaceId);
    }
    Location dataLoc = namespaceHome.append(Constants.Dataset.DEFAULT_DATA_DIR); // data/
    Location tempLoc = namespaceHome.append(cConf.get(Constants.AppFabric.TEMP_DIR)); // tmp/
    Location streamsLoc = namespaceHome.append(cConf.get(Constants.Stream.BASE_DIR)); // streams/
    Location deletedLoc = streamsLoc.append(StreamUtils.DELETED); // streams/.deleted/
    String configuredGroupName = namespaceMeta.getConfig().getGroupName();
    boolean createdData = false;
    boolean createdTemp = false;
    boolean createdStreams = false;
    try {
        if (createdHome && SecurityUtil.isKerberosEnabled(cConf)) {
            // set the group id of the namespace home if configured, or the current user's primary group
            String groupToSet = configuredGroupName;
            if (groupToSet == null) {
                // attempt to determine the current user's primary group. Note that we cannot use ugi.getPrimaryGroup()
                // because that is not implemented at least in Hadoop 2.0 and 2.2, possibly other versions. Also note
                // that there is no guarantee that getGroupNames() returns anything.
                String[] groups = UserGroupInformation.getCurrentUser().getGroupNames();
                if (groups != null && groups.length > 0) {
                    groupToSet = groups[0];
                }
            }
            // if this is still null at this point, then the directory will have whatever HDFS assigned at creation
            if (groupToSet != null) {
                namespaceHome.setGroup(groupToSet);
            }
        }
        // create all the directories with default permissions
        createdData = createNamespaceDir(dataLoc, "data", namespaceId);
        createdTemp = createNamespaceDir(tempLoc, "temp", namespaceId);
        createdStreams = createNamespaceDir(streamsLoc, "streams", namespaceId);
        createNamespaceDir(deletedLoc, "deleted streams", namespaceId);

        // then set all these directories to be owned and group writable by the namespace group as follows:
        // if a group name is configured, then that group; otherwise the same group as the namespace home dir
        if (SecurityUtil.isKerberosEnabled(cConf)) {
            String groupToSet = configuredGroupName != null ? configuredGroupName : namespaceHome.getGroup();
            for (Location loc : new Location[] { dataLoc, tempLoc, streamsLoc, deletedLoc }) {
                loc.setGroup(groupToSet);
                // set the permissions to rwx for group, if a group name was configured for the namespace
                if (configuredGroupName != null) {
                    String permissions = loc.getPermissions();
                    loc.setPermissions(permissions.substring(0, 3) + "rwx" + permissions.substring(6));
                }
            }
        }
    } catch (Throwable t) {
        if (createdHome) {
            deleteDirSilently(namespaceHome, t, "home", namespaceMeta.getNamespaceId());
        } else {
            if (createdData) {
                deleteDirSilently(dataLoc, t, "data", namespaceMeta.getNamespaceId());
            }
            if (createdTemp) {
                deleteDirSilently(tempLoc, t, "temp", namespaceMeta.getNamespaceId());
            }
            if (createdStreams) {
                deleteDirSilently(streamsLoc, t, "streams", namespaceMeta.getNamespaceId());
            }
        }
        throw t;
    }
}

From source file:co.cask.cdap.internal.app.namespace.DefaultNamespaceAdmin.java

License:Apache License

/**
 * Creates a new namespace//from   ww  w.ja v  a  2 s  . c  o m
 *
 * @param metadata the {@link NamespaceMeta} for the new namespace to be created
 * @throws NamespaceAlreadyExistsException if the specified namespace already exists
 */
@Override
@AuthEnforce(entities = "instanceId", enforceOn = InstanceId.class, actions = Action.ADMIN)
public synchronized void create(final NamespaceMeta metadata) throws Exception {
    // TODO: CDAP-1427 - This should be transactional, but we don't support transactions on files yet
    Preconditions.checkArgument(metadata != null, "Namespace metadata should not be null.");
    NamespaceId namespace = metadata.getNamespaceId();
    if (exists(namespace)) {
        throw new NamespaceAlreadyExistsException(namespace);
    }

    // If this namespace has custom mapping then validate the given custom mapping
    if (hasCustomMapping(metadata)) {
        validateCustomMapping(metadata);
    }

    // check that the user has configured either both of none of the following configuration: principal and keytab URI
    boolean hasValidKerberosConf = false;
    if (metadata.getConfig() != null) {
        String configuredPrincipal = metadata.getConfig().getPrincipal();
        String configuredKeytabURI = metadata.getConfig().getKeytabURI();
        if ((!Strings.isNullOrEmpty(configuredPrincipal) && Strings.isNullOrEmpty(configuredKeytabURI))
                || (Strings.isNullOrEmpty(configuredPrincipal)
                        && !Strings.isNullOrEmpty(configuredKeytabURI))) {
            throw new BadRequestException(String.format(
                    "Either neither or both of the following two configurations must be configured. "
                            + "Configured principal: %s, Configured keytabURI: %s",
                    configuredPrincipal, configuredKeytabURI));
        }
        hasValidKerberosConf = true;
    }

    // check that if explore as principal is explicitly set to false then user has kerberos configuration
    if (!metadata.getConfig().isExploreAsPrincipal() && !hasValidKerberosConf) {
        throw new BadRequestException(
                String.format("No kerberos principal or keytab-uri was provided while '%s' was set to true.",
                        NamespaceConfig.EXPLORE_AS_PRINCIPAL));

    }

    // Namespace can be created. Grant all the permissions to the user.
    Principal principal = authenticationContext.getPrincipal();
    privilegesManager.grant(namespace, principal, EnumSet.allOf(Action.class));

    // Also grant the user who will execute programs in this namespace all privileges on the namespace
    String executionUserName;
    if (SecurityUtil.isKerberosEnabled(cConf) && !NamespaceId.SYSTEM.equals(namespace)) {
        String namespacePrincipal = metadata.getConfig().getPrincipal();
        if (Strings.isNullOrEmpty(namespacePrincipal)) {
            executionUserName = SecurityUtil.getMasterPrincipal(cConf);
        } else {
            executionUserName = new KerberosName(namespacePrincipal).getShortName();
        }
    } else {
        executionUserName = UserGroupInformation.getCurrentUser().getShortUserName();
    }
    Principal executionUser = new Principal(executionUserName, Principal.PrincipalType.USER);
    privilegesManager.grant(namespace, executionUser, EnumSet.allOf(Action.class));

    // store the meta first in the namespace store because namespacedLocationFactory needs to look up location
    // mapping from namespace config
    nsStore.create(metadata);
    UserGroupInformation ugi;
    if (NamespaceId.DEFAULT.equals(namespace)) {
        ugi = UserGroupInformation.getCurrentUser();
    } else {
        ugi = impersonator.getUGI(namespace);
    }
    try {
        ImpersonationUtils.doAs(ugi, new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                storageProviderNamespaceAdmin.get().create(metadata);
                return null;
            }
        });
    } catch (Throwable t) {
        // failed to create namespace in underlying storage so delete the namespace meta stored in the store earlier
        deleteNamespaceMeta(metadata.getNamespaceId());
        privilegesManager.revoke(namespace);
        throw new NamespaceCannotBeCreatedException(namespace, t);
    }
    LOG.info("Namespace {} created with meta {}", metadata.getNamespaceId(), metadata);
}