Example usage for org.apache.hadoop.security UserGroupInformation getUserName

List of usage examples for org.apache.hadoop.security UserGroupInformation getUserName

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getUserName.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public String getUserName() 

Source Link

Document

Get the user's full principal name.

Usage

From source file:com.thinkbiganalytics.kerberos.TestKerberosKinit.java

License:Apache License

private void testHdfsWithUserImpersonation(final String configResources, final String keytab,
        final String principal, String proxyUser, final String environment, final String hdfsUrl) {
    final String path = "/user";
    try {//from   w ww  .j  a va  2 s  . co  m
        final Configuration configuration = TestKerberosKinit.createConfigurationFromList(configResources);
        UserGroupInformation realugi = TestKerberosKinit.generateKerberosTicket(configuration, keytab,
                principal);
        System.out.println(" ");
        System.out.println("Sucessfully got a kerberos ticket in the JVM");
        System.out.println("current user is: " + realugi.getUserName());

        UserGroupInformation ugiProxy = UserGroupInformation.createProxyUser(proxyUser, realugi);
        System.out.println("proxy user is: " + ugiProxy.getUserName());
        ugiProxy.doAs(new PrivilegedExceptionAction<Object>() {
            public Object run() {
                try {
                    searchHDFS(configuration, environment, path, hdfsUrl);
                } catch (Exception e) {
                    throw new RuntimeException("Error testing HDFS with Kerberos Hive Impersonation", e);
                }
                return null;
            }
        });

    } catch (Exception e) {
        System.out.println("Error testing HDFS\n\n");
        e.printStackTrace();
    }
}

From source file:com.thinkbiganalytics.kerberos.TestKerberosKinit.java

License:Apache License

private void testHdfsAsKerberosUser(final String configResources, final String keytab, final String principal,
        final String environment, final String hdfsUrl) {
    final String path = "/user";
    try {/*from   w w w.j ava  2 s  .  co m*/
        final Configuration configuration = TestKerberosKinit.createConfigurationFromList(configResources);
        UserGroupInformation realugi = TestKerberosKinit.generateKerberosTicket(configuration, keytab,
                principal);
        System.out.println(" ");
        System.out.println("Sucessfully got a kerberos ticket in the JVM");
        System.out.println("current user is: " + realugi.getUserName());

        realugi.doAs(new PrivilegedExceptionAction<Object>() {
            public Object run() {
                try {
                    searchHDFS(configuration, environment, path, hdfsUrl);
                } catch (Exception e) {
                    throw new RuntimeException("Error testing HDFS with Kerberos", e);
                }
                return null;
            }
        });

    } catch (Exception e) {
        System.out.println("Error testing HDFS\n\n");
        e.printStackTrace();
    }
}

From source file:com.thinkbiganalytics.nifi.v2.hdfs.AbstractHadoopProcessor.java

License:Apache License

protected void tryKerberosRelogin(UserGroupInformation ugi) {
    try {//from w  w w . j  a va 2s. co  m
        getLog().info(
                "Kerberos ticket age exceeds threshold [{} seconds] "
                        + "attempting to renew ticket for user {}",
                new Object[] { kerberosReloginThreshold, ugi.getUserName() });
        ugi.checkTGTAndReloginFromKeytab();
        lastKerberosReloginTime = System.currentTimeMillis() / 1000;
        getLog().info("Kerberos relogin successful or ticket still valid");
    } catch (IOException e) {
        // Most likely case of this happening is ticket is expired and error getting a new one,
        // meaning dfs operations would fail
        getLog().error("Kerberos relogin failed", e);
        throw new ProcessException("Unable to renew kerberos ticket", e);
    }
}

From source file:com.wandisco.s3hdfs.rewrite.filter.S3HdfsFilter.java

License:Apache License

public String getUserName(HttpServletRequest request) {
    UserGroupInformation ugi;
    try {/*from ww w  .  j av a 2  s  .com*/
        ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        LOG.warn("Current user is not available.", e);
        return null;
    }
    if (fakeName == null) {
        String name = request.getParameter("user.name");
        if (name == null) {
            name = ugi.getUserName();
        }
        return name;
    } else
        return fakeName;
}

From source file:com.wandisco.s3hdfs.rewrite.filter.S3HdfsTestUtil.java

License:Apache License

S3Service configureS3Service(String host, int proxy) throws IOException, S3ServiceException {
    // configure the service
    Jets3tProperties props = new Jets3tProperties();
    props.setProperty("s3service.disable-dns-buckets", String.valueOf(true));
    props.setProperty("s3service.s3-endpoint", host);
    props.setProperty("s3service.s3-endpoint-http-port", String.valueOf(proxy));
    props.setProperty("s3service.https-only", String.valueOf(false));
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    AWSCredentials creds = new AWSCredentials(ugi.getShortUserName(), "SomeSecretKey", ugi.getUserName());
    return new RestS3Service(creds, null, null, props);
}

From source file:common.NameNode.java

License:Apache License

/** @inheritDoc */
public void createSymlink(String target, String link, FsPermission dirPerms, boolean createParent)
        throws IOException {
    myMetrics.numcreateSymlinkOps.inc();
    /* We enforce the MAX_PATH_LENGTH limit even though a symlink target 
     * URI may refer to a non-HDFS file system. 
     *//*ww  w .  j  av  a  2s  . c  o m*/
    if (!checkPathLength(link)) {
        throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + " character limit");

    }
    if ("".equals(target)) {
        throw new IOException("Invalid symlink target");
    }
    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    namesystem.createSymlink(target, link, new PermissionStatus(ugi.getUserName(), null, dirPerms),
            createParent);
}

From source file:de.tiqsolutions.hdfs.HadoopFileSystem.java

License:Apache License

void checkAccess(Path path, AccessMode... modes) throws IOException {
    try {/*from   www  .  j a va 2  s .c  o  m*/
        FileStatus fileStatus = getFileContext().getFileStatus(((HadoopFileSystemPath) path).getPath());
        if (modes == null || modes.length == 0)
            return;

        String group = fileStatus.getGroup();
        String owner = fileStatus.getOwner();
        UserGroupInformation userGroupInformation = getFileContext().getUgi();

        boolean checkuser = false;
        boolean checkgroup = false;

        if (owner.equals(userGroupInformation.getUserName())) {
            checkuser = true;
        } else {
            for (String g : userGroupInformation.getGroupNames()) {
                if (group.equals(g)) {
                    checkgroup = true;
                    break;
                }

            }

        }

        PosixFileAttributeView view = provider().getFileAttributeView(path, PosixFileAttributeView.class);
        PosixFileAttributes attributes = view.readAttributes();
        Set<PosixFilePermission> permissions = attributes.permissions();

        getFileContext().getUgi().getGroupNames();
        for (AccessMode accessMode : modes) {
            switch (accessMode) {
            case READ:
                if (!permissions.contains(checkuser ? PosixFilePermission.OWNER_READ
                        : (checkgroup ? PosixFilePermission.GROUP_READ : PosixFilePermission.OTHERS_READ)))
                    throw new AccessDeniedException(path.toString());
                break;
            case WRITE:
                if (!permissions.contains(checkuser ? PosixFilePermission.OWNER_WRITE
                        : (checkgroup ? PosixFilePermission.GROUP_WRITE : PosixFilePermission.OTHERS_WRITE)))
                    throw new AccessDeniedException(path.toString());
                break;
            case EXECUTE:
                if (!permissions.contains(checkuser ? PosixFilePermission.OWNER_EXECUTE
                        : (checkgroup ? PosixFilePermission.GROUP_EXECUTE
                                : PosixFilePermission.OTHERS_EXECUTE)))
                    throw new AccessDeniedException(path.toString());
                break;
            }
        }

    } catch (FileNotFoundException e) {
        throw new NoSuchFileException(path.toString());
    }
}

From source file:hydrograph.engine.utilities.HiveMetastoreTokenProvider.java

License:Apache License

public static void obtainTokenForHiveMetastore(Configuration conf) throws TException, IOException {
    conf.addResource(new Path(HiveConfigurationMapping.getHiveConf("path_to_hive_site_xml")));
    HiveConf hiveConf = new HiveConf();
    hiveConf.addResource(conf);/*from ww  w . j  a va2  s  .c o m*/
    try {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        HiveMetaStoreClient hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);

        if (UserGroupInformation.isSecurityEnabled()) {
            String metastore_uri = conf.get("hive.metastore.uris");

            LOG.trace("Metastore URI:" + metastore_uri);

            // Check for local metastore
            if (metastore_uri != null && metastore_uri.length() > 0) {
                String principal = conf.get("hive.metastore.kerberos.principal");
                String username = ugi.getUserName();

                if (principal != null && username != null) {
                    LOG.debug("username: " + username);
                    LOG.debug("principal: " + principal);

                    String tokenStr;
                    try {
                        // Get a delegation token from the Metastore.
                        tokenStr = hiveMetaStoreClient.getDelegationToken(username, principal);
                        // LOG.debug("Token String: " + tokenStr);
                    } catch (TException e) {
                        LOG.error(e.getMessage(), e);
                        throw new RuntimeException(e);
                    }

                    // Create the token from the token string.
                    Token<DelegationTokenIdentifier> hmsToken = new Token<DelegationTokenIdentifier>();
                    hmsToken.decodeFromUrlString(tokenStr);
                    // LOG.debug("Hive Token: " + hmsToken);

                    // Add the token to the credentials.
                    ugi.addToken(new Text("hive.metastore.delegation.token"), hmsToken);
                    LOG.trace("Added hive.metastore.delegation.token to conf.");
                } else {
                    LOG.debug("Username or principal == NULL");
                    LOG.debug("username= " + username);
                    LOG.debug("principal= " + principal);
                    throw new IllegalArgumentException("username and/or principal is equal to null!");
                }

            } else {
                LOG.info("HiveMetaStore configured in local mode");
            }
        }
    } catch (IOException e) {
        LOG.error(e.getMessage(), e);
        throw new RuntimeException(e);
    } catch (MetaException e) {
        LOG.error(e.getMessage(), e);
        throw new RuntimeException(e);
    }
}

From source file:io.confluent.connect.hdfs.DataWriter.java

License:Apache License

public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) {
    try {/*from   w  w w . j  a  va  2  s.  c  om*/
        String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG);
        System.setProperty("hadoop.home.dir", hadoopHome);

        this.connectorConfig = connectorConfig;
        this.avroData = avroData;
        this.context = context;

        String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
        log.info("Hadoop configuration directory {}", hadoopConfDir);
        conf = new Configuration();
        if (!hadoopConfDir.equals("")) {
            conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
            conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
        }

        boolean secureHadoop = connectorConfig
                .getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
        if (secureHadoop) {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
            String principalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
            String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

            if (principalConfig == null || keytab == null) {
                throw new ConfigException(
                        "Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
                                + "the path to the keytab of the principal.");
            }

            conf.set("hadoop.security.authentication", "kerberos");
            conf.set("hadoop.security.authorization", "true");
            String hostname = InetAddress.getLocalHost().getCanonicalHostName();
            // replace the _HOST specified in the principal config to the actual host
            String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
            String namenodePrincipalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

            String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
            // namenode principal is needed for multi-node hadoop cluster
            if (conf.get("dfs.namenode.kerberos.principal") == null) {
                conf.set("dfs.namenode.kerberos.principal", namenodePrincipal);
            }
            log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
            final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
            log.info("Login as: " + ugi.getUserName());

            final long renewPeriod = connectorConfig
                    .getLong(HdfsSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

            isRunning = true;
            ticketRenewThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    synchronized (DataWriter.this) {
                        while (isRunning) {
                            try {
                                DataWriter.this.wait(renewPeriod);
                                if (isRunning) {
                                    ugi.reloginFromKeytab();
                                }
                            } catch (IOException e) {
                                // We ignore this exception during relogin as each successful relogin gives
                                // additional 24 hours of authentication in the default config. In normal
                                // situations, the probability of failing relogin 24 times is low and if
                                // that happens, the task will fail eventually.
                                log.error("Error renewing the ticket", e);
                            } catch (InterruptedException e) {
                                // ignored
                            }
                        }
                    }
                }
            });
            log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
            ticketRenewThread.start();
        }

        url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
        topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
        String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);

        @SuppressWarnings("unchecked")
        Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
                .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
        storage = StorageFactory.createStorage(storageClass, conf, url);

        createDir(topicsDir);
        createDir(topicsDir + HdfsSinkConnectorConstants.TEMPFILE_DIRECTORY);
        createDir(logsDir);

        format = getFormat();
        writerProvider = format.getRecordWriterProvider();
        schemaFileReader = format.getSchemaFileReader(avroData);

        partitioner = createPartitioner(connectorConfig);

        assignment = new HashSet<>(context.assignment());
        offsets = new HashMap<>();

        hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
        if (hiveIntegration) {
            hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
            hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
            hive = format.getHiveUtil(connectorConfig, avroData, hiveMetaStore);
            executorService = Executors.newSingleThreadExecutor();
            hiveUpdateFutures = new LinkedList<>();
        }

        topicPartitionWriters = new HashMap<>();
        for (TopicPartition tp : assignment) {
            TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider,
                    partitioner, connectorConfig, context, avroData, hiveMetaStore, hive, schemaFileReader,
                    executorService, hiveUpdateFutures);
            topicPartitionWriters.put(tp, topicPartitionWriter);
        }
    } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
        throw new ConnectException("Reflection exception: ", e);
    } catch (IOException e) {
        throw new ConnectException(e);
    }
}

From source file:nl.surfsara.warcexamples.hdfs.Headers.java

License:Apache License

@Override
public void run() {
    // PropertyConfigurator.configure("log4jconfig.properties");
    final Configuration conf = new Configuration();
    // The core-site.xml and hdfs-site.xml are cluster specific. If you wish to use this on other clusters adapt the files as needed.
    conf.addResource(/*from w w  w . ja  va2 s .c o  m*/
            Headers.class.getResourceAsStream("/nl/surfsara/warcexamples/hdfs/resources/core-site.xml"));
    conf.addResource(
            Headers.class.getResourceAsStream("/nl/surfsara/warcexamples/hdfs/resources/hdfs-site.xml"));

    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");

    System.setProperty("java.security.krb5.realm", "CUA.SURFSARA.NL");
    System.setProperty("java.security.krb5.kdc", "kdc.hathi.surfsara.nl");

    UserGroupInformation.setConfiguration(conf);

    UserGroupInformation loginUser;
    try {
        loginUser = UserGroupInformation.getLoginUser();
        System.out.println("Logged in as: " + loginUser.getUserName());
        PrintHeaders printHeaders = new PrintHeaders(conf, path);
        loginUser.doAs(printHeaders);
    } catch (IOException e) {
        // Just dump the error..
        e.printStackTrace();
    }
}