Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test//from  ww w.  ja va2 s .  co m
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.lib.hive.HiveMetastoreUtil.java

License:Apache License

/**
 * Returns the hdfs paths where the avro schema is stored after serializing.
 * Path is appended with current time so as to have an ordering.
 * @param rootTableLocation Root Table Location
 * @return Hdfs Path String.// ww  w  .  j  a v a2  s . co m
 */
public static String serializeSchemaToHDFS(UserGroupInformation loginUGI, final FileSystem fs,
        final String rootTableLocation, final String schemaJson) throws StageException {
    final String folderPath = rootTableLocation + HiveMetastoreUtil.SEP
            + HiveMetastoreUtil.HDFS_SCHEMA_FOLDER_NAME;
    final Path schemasFolderPath = new Path(folderPath);
    final String path = folderPath + SEP + HiveMetastoreUtil.AVRO_SCHEMA
            + DateFormatUtils.format(new Date(System.currentTimeMillis()), "yyyy-MM-dd--HH_mm_ss");
    try {
        loginUGI.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                if (!fs.exists(schemasFolderPath)) {
                    fs.mkdirs(schemasFolderPath);
                }
                Path schemaFilePath = new Path(path);
                //This will never happen unless two HMS targets are writing, we will error out for this
                //and let user handle this via error record handling.
                if (!fs.exists(schemaFilePath)) {
                    try (FSDataOutputStream os = fs.create(schemaFilePath)) {
                        os.writeChars(schemaJson);
                    }
                } else {
                    LOG.error(Utils.format("Already schema file {} exists in HDFS", path));
                    throw new IOException("Already schema file exists");
                }
                return null;
            }
        });
    } catch (Exception e) {
        LOG.error("Error in Writing Schema to HDFS: " + e.toString(), e);
        throw new StageException(Errors.HIVE_18, path, e.getMessage());
    }
    return path;
}

From source file:com.telefonica.iot.cygnus.backends.hdfs.HDFSBackendImplBinary.java

License:Open Source License

@Override
public void createDir(String dirPath) throws Exception {
    CreateDirPEA pea = new CreateDirPEA(dirPath);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
    ugi.doAs(pea);
}

From source file:com.telefonica.iot.cygnus.backends.hdfs.HDFSBackendImplBinary.java

License:Open Source License

@Override
public void createFile(String filePath, String data) throws Exception {
    CreateFilePEA pea = new CreateFilePEA(filePath, data);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
    ugi.doAs(pea);
}

From source file:com.telefonica.iot.cygnus.backends.hdfs.HDFSBackendImplBinary.java

License:Open Source License

@Override
public void append(String filePath, String data) throws Exception {
    AppendPEA pea = new AppendPEA(filePath, data);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
    ugi.doAs(pea);
}

From source file:com.telefonica.iot.cygnus.backends.hdfs.HDFSBackendImplBinary.java

License:Open Source License

@Override
public boolean exists(String filePath) throws Exception {
    ExistsPEA pea = new ExistsPEA(filePath);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
    ugi.doAs(pea);
    return pea.exists();
}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

License:Apache License

@Override
public void createOrUpdateReadOnlyHivePolicy(String categoryName, String feedName,
        List<String> hadoopAuthorizationGroups, String datebaseName, List<String> tableNames) {

    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {//w ww  .j a  v a 2s  .com
            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        String sentryPolicyName = getHivePolicyName(categoryName, feedName);
                        if (!(sentryClientObject.checkIfRoleExists(sentryPolicyName))) {
                            createReadOnlyHivePolicy(categoryName, feedName, hadoopAuthorizationGroups,
                                    datebaseName, tableNames);
                        } else {
                            try {
                                updateReadOnlyHivePolicy(categoryName, feedName, hadoopAuthorizationGroups,
                                        datebaseName, tableNames);
                            } catch (Exception e) {
                                log.error("Failed to update Hive Policy" + e.getMessage());
                                throw new RuntimeException(e);
                            }
                        }
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Error Creating Sentry Hive Policy using Kerberos Authentication" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        String sentryPolicyName = getHivePolicyName(categoryName, feedName);
        if (!(sentryClientObject.checkIfRoleExists(sentryPolicyName))) {
            createReadOnlyHivePolicy(categoryName, feedName, hadoopAuthorizationGroups, datebaseName,
                    tableNames);
        } else {
            try {
                updateReadOnlyHivePolicy(categoryName, feedName, hadoopAuthorizationGroups, datebaseName,
                        tableNames);
            } catch (Exception e) {
                log.error("Failed to update Hive Policy" + e.getMessage());
                throw new RuntimeException(e);
            }
        }
    }

}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

License:Apache License

@Override
public void createOrUpdateReadOnlyHdfsPolicy(String categoryName, String feedName,
        List<String> hadoopAuthorizationGroups, List<String> hdfsPaths) {

    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {//from  w w w  .ja v a  2 s. c o  m
            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        createReadOnlyHdfsPolicy(categoryName, feedName, hadoopAuthorizationGroups, hdfsPaths);
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Error Creating Sentry HDFS Policy using Kerberos Authentication" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        createReadOnlyHdfsPolicy(categoryName, feedName, hadoopAuthorizationGroups, hdfsPaths);
    }
}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

License:Apache License

/**
 * If no security policy exists it will be created. If a policy exists it will be updated
 *//*from w  w w . j  a v  a 2s .c  o  m*/
@Override
public void updateSecurityGroupsForAllPolicies(String categoryName, String feedName,
        List<String> securityGroupNames, Map<String, Object> feedProperties) {
    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {
            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {

                        if (securityGroupNames == null || securityGroupNames.isEmpty()) {

                            // Only delete if the policies exists. It's possibile that someone adds a security group right after feed creation and before initial ingestion
                            String sentryPolicyName = getHivePolicyName(categoryName, feedName);
                            if ((sentryClientObject.checkIfRoleExists(sentryPolicyName))) {
                                deleteHivePolicy(categoryName, feedName);
                            }
                            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                                String hdfsFoldersWithCommas = ((String) feedProperties
                                        .get(REGISTRATION_HDFS_FOLDERS)).replace("\n", ",");
                                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(","))
                                        .stream().collect(Collectors.toList());
                                deleteHdfsPolicy(categoryName, feedName, hdfsFolders);
                            }

                        } else {

                            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                                String hdfsFoldersWithCommas = ((String) feedProperties
                                        .get(REGISTRATION_HDFS_FOLDERS)).replace("\n", ",");
                                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(","))
                                        .stream().collect(Collectors.toList());
                                createReadOnlyHdfsPolicy(categoryName, feedName, securityGroupNames,
                                        hdfsFolders);
                            }

                            String sentryHivePolicyName = getHivePolicyName(categoryName, feedName);
                            if (!sentryClientObject.checkIfRoleExists(sentryHivePolicyName)) {
                                if (!StringUtils
                                        .isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                                    String hiveTablesWithCommas = ((String) feedProperties
                                            .get(REGISTRATION_HIVE_TABLES)).replace("\n", ",");
                                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(","))
                                            .stream().collect(Collectors.toList()); //Stream.of(hiveTablesWithCommas).collect(Collectors.toList());
                                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));
                                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames,
                                            hiveSchema, hiveTables);
                                }

                            } else {

                                if (!StringUtils
                                        .isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                                    try {
                                        sentryClientObject.dropRole(sentryHivePolicyName);
                                    } catch (SentryClientException e) {
                                        log.error("Unable to delete Hive policy  " + sentryHivePolicyName
                                                + " in Sentry   " + e.getMessage());
                                        throw new RuntimeException(e);
                                    }

                                    String hiveTablesWithCommas = ((String) feedProperties
                                            .get(REGISTRATION_HIVE_TABLES)).replace("\n", ",");
                                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(","))
                                            .stream().collect(Collectors.toList());
                                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));
                                    List<String> hivePermissions = new ArrayList();
                                    hivePermissions.add(HIVE_READ_ONLY_PERMISSION);
                                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames,
                                            hiveSchema, hiveTables);
                                }
                            }
                        }
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Error Creating Sentry Policy using Kerberos Authentication" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        if (securityGroupNames == null || securityGroupNames.isEmpty()) {

            String sentryPolicyName = getHivePolicyName(categoryName, feedName);
            if ((sentryClientObject.checkIfRoleExists(sentryPolicyName))) {
                deleteHivePolicy(categoryName, feedName);
            }

            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                String hdfsFoldersWithCommas = ((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))
                        .replace("\n", ",");
                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(",")).stream()
                        .collect(Collectors.toList());
                deleteHdfsPolicy(categoryName, feedName, hdfsFolders);
            }
        } else {

            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                String hdfsFoldersWithCommas = ((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))
                        .replace("\n", ",");
                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(",")).stream()
                        .collect(Collectors.toList());
                createReadOnlyHdfsPolicy(categoryName, feedName, securityGroupNames, hdfsFolders);
            }

            String sentryHivePolicyName = getHivePolicyName(categoryName, feedName);
            if (!sentryClientObject.checkIfRoleExists(sentryHivePolicyName)) {

                if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                    String hiveTablesWithCommas = ((String) feedProperties.get(REGISTRATION_HIVE_TABLES))
                            .replace("\n", ",");
                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(",")).stream()
                            .collect(Collectors.toList()); //Stream.of(hiveTablesWithCommas).collect(Collectors.toList());
                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));

                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames, hiveSchema,
                            hiveTables);
                }
            } else {

                if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                    try {
                        sentryClientObject.dropRole(sentryHivePolicyName);
                    } catch (SentryClientException e) {
                        log.error("Unable to delete Hive policy  " + sentryHivePolicyName + " in Sentry   "
                                + e.getMessage());
                        throw new RuntimeException(e);
                    }
                    String hiveTablesWithCommas = ((String) feedProperties.get(REGISTRATION_HIVE_TABLES))
                            .replace("\n", ",");
                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(",")).stream()
                            .collect(Collectors.toList()); //Stream.of(hiveTablesWithCommas).collect(Collectors.toList());
                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));
                    List<String> hivePermissions = new ArrayList();
                    hivePermissions.add(HIVE_READ_ONLY_PERMISSION);
                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames, hiveSchema,
                            hiveTables);
                }
            }
        }

    }
}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

License:Apache License

@Override
public void deleteHivePolicy(String categoryName, String feedName) {
    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {/*from  w w w . j  a va  2s. c  o  m*/

            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        String sentryPolicyName = getHivePolicyName(categoryName, feedName);
                        if (sentryClientObject.checkIfRoleExists(sentryPolicyName)) {
                            try {
                                sentryClientObject.dropRole(sentryPolicyName);
                            } catch (SentryClientException e) {
                                log.error("Unable to delete policy  " + sentryPolicyName + " in Sentry  "
                                        + e.getMessage());
                                throw new RuntimeException(e);
                            }
                        }
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Failed to Delete Hive Policy With Kerberos" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        String sentryPolicyName = getHivePolicyName(categoryName, feedName);
        if (sentryClientObject.checkIfRoleExists(sentryPolicyName)) {
            try {
                sentryClientObject.dropRole(sentryPolicyName);
            } catch (SentryClientException e) {
                log.error("Unable to delete policy  " + sentryPolicyName + " in Sentry  " + e.getMessage());
                throw new RuntimeException(e);
            }
        }

    }

}