Example usage for org.apache.hadoop.security UserGroupInformation getLoginUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getLoginUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getLoginUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getLoginUser() throws IOException 

Source Link

Document

Get the currently logged in user.

Usage

From source file:gobblin.yarn.YarnContainerSecurityManager.java

License:Apache License

@VisibleForTesting
void addDelegationTokens(Collection<Token<? extends TokenIdentifier>> tokens) throws IOException {
    for (Token<? extends TokenIdentifier> token : tokens) {
        if (!UserGroupInformation.getCurrentUser().addToken(token)) {
            LOGGER.error(String.format("Failed to add token %s to user %s", token.toString(),
                    UserGroupInformation.getLoginUser().getShortUserName()));
        }//  www .j a v a 2 s.  c om
    }
}

From source file:gobblin.yarn.YarnSecurityManagerTest.java

License:Apache License

@BeforeClass
public void setUp() throws Exception {
    // Use a random ZK port
    TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
    LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());

    this.curatorFramework = this.closer.register(
            CuratorFrameworkFactory.newClient(testingZKServer.getConnectString(), new RetryOneTime(2000)));
    this.curatorFramework.start();

    URL url = YarnSecurityManagerTest.class.getClassLoader()
            .getResource(YarnSecurityManagerTest.class.getSimpleName() + ".conf");
    Assert.assertNotNull(url, "Could not find resource " + url);

    Config config = ConfigFactory.parseURL(url).withValue("gobblin.cluster.zk.connection.string",
            ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString())).resolve();

    String zkConnectingString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
    String helixClusterName = config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY);

    HelixUtils.createGobblinHelixCluster(zkConnectingString, helixClusterName);

    this.helixManager = HelixManagerFactory.getZKHelixManager(helixClusterName,
            TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.SPECTATOR, zkConnectingString);
    this.helixManager.connect();

    this.configuration = new Configuration();
    this.localFs = Mockito.spy(FileSystem.getLocal(this.configuration));

    this.token = new Token<>();
    this.token.setKind(new Text("test"));
    this.token.setService(new Text("test"));
    Mockito.<Token<?>>when(//from   w  w w .  j  ava2s .  c  om
            this.localFs.getDelegationToken(UserGroupInformation.getLoginUser().getShortUserName()))
            .thenReturn(this.token);

    this.baseDir = new Path(YarnSecurityManagerTest.class.getSimpleName());
    this.tokenFilePath = new Path(this.baseDir, GobblinYarnConfigurationKeys.TOKEN_FILE_NAME);
    this.yarnAppSecurityManager = new YarnAppSecurityManager(config, this.helixManager, this.localFs,
            this.tokenFilePath);
    this.yarnContainerSecurityManager = new YarnContainerSecurityManager(config, this.localFs, new EventBus());
}

From source file:io.confluent.connect.hdfs.DataWriter.java

License:Apache License

public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) {
    try {//from  ww  w. j  a  va2 s.c om
        String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG);
        System.setProperty("hadoop.home.dir", hadoopHome);

        this.connectorConfig = connectorConfig;
        this.avroData = avroData;
        this.context = context;

        String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
        log.info("Hadoop configuration directory {}", hadoopConfDir);
        conf = new Configuration();
        if (!hadoopConfDir.equals("")) {
            conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
            conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
        }

        boolean secureHadoop = connectorConfig
                .getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
        if (secureHadoop) {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
            String principalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
            String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

            if (principalConfig == null || keytab == null) {
                throw new ConfigException(
                        "Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
                                + "the path to the keytab of the principal.");
            }

            conf.set("hadoop.security.authentication", "kerberos");
            conf.set("hadoop.security.authorization", "true");
            String hostname = InetAddress.getLocalHost().getCanonicalHostName();
            // replace the _HOST specified in the principal config to the actual host
            String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
            String namenodePrincipalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

            String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
            // namenode principal is needed for multi-node hadoop cluster
            if (conf.get("dfs.namenode.kerberos.principal") == null) {
                conf.set("dfs.namenode.kerberos.principal", namenodePrincipal);
            }
            log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
            final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
            log.info("Login as: " + ugi.getUserName());

            final long renewPeriod = connectorConfig
                    .getLong(HdfsSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

            isRunning = true;
            ticketRenewThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    synchronized (DataWriter.this) {
                        while (isRunning) {
                            try {
                                DataWriter.this.wait(renewPeriod);
                                if (isRunning) {
                                    ugi.reloginFromKeytab();
                                }
                            } catch (IOException e) {
                                // We ignore this exception during relogin as each successful relogin gives
                                // additional 24 hours of authentication in the default config. In normal
                                // situations, the probability of failing relogin 24 times is low and if
                                // that happens, the task will fail eventually.
                                log.error("Error renewing the ticket", e);
                            } catch (InterruptedException e) {
                                // ignored
                            }
                        }
                    }
                }
            });
            log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
            ticketRenewThread.start();
        }

        url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
        topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
        String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);

        @SuppressWarnings("unchecked")
        Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
                .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
        storage = StorageFactory.createStorage(storageClass, conf, url);

        createDir(topicsDir);
        createDir(topicsDir + HdfsSinkConnectorConstants.TEMPFILE_DIRECTORY);
        createDir(logsDir);

        format = getFormat();
        writerProvider = format.getRecordWriterProvider();
        schemaFileReader = format.getSchemaFileReader(avroData);

        partitioner = createPartitioner(connectorConfig);

        assignment = new HashSet<>(context.assignment());
        offsets = new HashMap<>();

        hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
        if (hiveIntegration) {
            hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
            hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
            hive = format.getHiveUtil(connectorConfig, avroData, hiveMetaStore);
            executorService = Executors.newSingleThreadExecutor();
            hiveUpdateFutures = new LinkedList<>();
        }

        topicPartitionWriters = new HashMap<>();
        for (TopicPartition tp : assignment) {
            TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider,
                    partitioner, connectorConfig, context, avroData, hiveMetaStore, hive, schemaFileReader,
                    executorService, hiveUpdateFutures);
            topicPartitionWriters.put(tp, topicPartitionWriter);
        }
    } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
        throw new ConnectException("Reflection exception: ", e);
    } catch (IOException e) {
        throw new ConnectException(e);
    }
}

From source file:io.hops.hopsworks.common.security.BaseHadoopClientsService.java

License:Open Source License

@PostConstruct
public void init() {
    String confDir = settings.getHadoopConfDir();
    File coreSite = new File(confDir, "core-site.xml");
    if (!coreSite.exists()) {
        handleMissingConf("core-site.xml", confDir);
    }//from w  w  w. j a v  a 2s.  c om

    Configuration conf = new Configuration();
    conf.addResource(new Path(coreSite.getAbsolutePath()));

    sslConf = new Configuration(false);
    String hadoopConfDir = settings.getHadoopConfDir();
    File serverSSLConf = new File(hadoopConfDir, conf.get(SSLFactory.SSL_SERVER_CONF_KEY, "ssl-server.xml"));
    sslConf.addResource(new Path(serverSSLConf.getAbsolutePath()));
    superKeystorePath = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY));
    superKeystorePassword = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY));
    superTrustStorePath = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY));
    superTrustStorePassword = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY));
    try {
        superuser = UserGroupInformation.getLoginUser().getUserName();
    } catch (IOException ex) {
        throw new IllegalStateException("Could not identify login user");
    }
}

From source file:io.prestosql.plugin.hive.authentication.SimpleHadoopAuthentication.java

License:Apache License

@Override
public UserGroupInformation getUserGroupInformation() {
    try {//  w  w  w. j a v  a2 s.  c  om
        return UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}

From source file:joshelser.Server.java

License:Apache License

public static void main(String[] args) throws Exception {
    Opts opts = new Opts();

    opts.parseArgs(Server.class, args);

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    // Parse out the primary/instance@DOMAIN from the principal
    String principal = SecurityUtil.getServerPrincipal(opts.principal,
            InetAddress.getLocalHost().getCanonicalHostName());
    HadoopKerberosName name = new HadoopKerberosName(principal);
    String primary = name.getServiceName();
    String instance = name.getHostName();

    // Log in using the keytab
    UserGroupInformation.loginUserFromKeytab(principal, opts.keytab);

    // Get the info from our login
    UserGroupInformation serverUser = UserGroupInformation.getLoginUser();
    log.info("Current user: {}", serverUser);

    // Open the server using the provide dport
    TServerSocket serverTransport = new TServerSocket(opts.port);

    // Wrap our implementation with the interface's processor
    HdfsService.Processor<Iface> processor = new HdfsService.Processor<Iface>(new HdfsServiceImpl(fs));

    // Use authorization and confidentiality
    Map<String, String> saslProperties = new HashMap<String, String>();
    saslProperties.put(Sasl.QOP, "auth-conf");

    // Creating the server definition
    TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory();
    saslTransportFactory.addServerDefinition("GSSAPI", // tell SASL to use GSSAPI, which supports Kerberos
            primary, // kerberos primary for server - "myprincipal" in myprincipal/my.server.com@MY.REALM
            instance, // kerberos instance for server - "my.server.com" in myprincipal/my.server.com@MY.REALM
            saslProperties, // Properties set, above
            new SaslRpcServer.SaslGssCallbackHandler()); // Ensures that authenticated user is the same as the authorized user

    // Make sure the TTransportFactory is performing a UGI.doAs
    TTransportFactory ugiTransportFactory = new TUGIAssumingTransportFactory(saslTransportFactory, serverUser);

    // Processor which takes the UGI for the RPC call, proxy that user on the server login, and then run as the proxied user
    TUGIAssumingProcessor ugiProcessor = new TUGIAssumingProcessor(processor);

    // Make a simple TTheadPoolServer with the processor and transport factory
    TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport)
            .transportFactory(ugiTransportFactory).processor(ugiProcessor));

    // Start the thrift server
    server.serve();//ww w .j a v a2 s  .c  o m
}

From source file:joshelser.TUGIAssumingProcessor.java

License:Apache License

@Override
public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
    TTransport trans = inProt.getTransport();
    if (!(trans instanceof TSaslServerTransport)) {
        throw new TException("Unexpected non-SASL transport " + trans.getClass());
    }/*from www . j a va  2  s . co  m*/
    TSaslServerTransport saslTrans = (TSaslServerTransport) trans;
    SaslServer saslServer = saslTrans.getSaslServer();
    String authId = saslServer.getAuthorizationID();
    String endUser = authId;

    UserGroupInformation clientUgi = null;
    try {
        clientUgi = UserGroupInformation.createProxyUser(endUser, UserGroupInformation.getLoginUser());
        final String remoteUser = clientUgi.getShortUserName();
        log.debug("Executing action as {}", remoteUser);
        return clientUgi.doAs(new PrivilegedExceptionAction<Boolean>() {
            @Override
            public Boolean run() {
                try {
                    return wrapped.process(inProt, outProt);
                } catch (TException te) {
                    throw new RuntimeException(te);
                }
            }
        });
    } catch (RuntimeException rte) {
        if (rte.getCause() instanceof TException) {
            log.error("Failed to invoke wrapped processor", rte.getCause());
            throw (TException) rte.getCause();
        }
        throw rte;
    } catch (InterruptedException | IOException e) {
        log.error("Failed to invoke wrapped processor", e);
        throw new RuntimeException(e);
    } finally {
        if (clientUgi != null) {
            try {
                FileSystem.closeAllForUGI(clientUgi);
            } catch (IOException exception) {
                log.error("Could not clean up file-system handles for UGI: {}", clientUgi, exception);
            }
        }
    }
}

From source file:nl.surfsara.newsreader.loader.Loader.java

License:Apache License

private void init() throws IOException {
    conf = new Configuration();
    conf.addResource(new Path("core-site.xml"));
    conf.addResource(new Path("hdfs-site.xml"));

    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");

    System.setProperty("java.security.krb5.realm", "CUA.SURFSARA.NL");
    System.setProperty("java.security.krb5.kdc", "kerberos1.osd.surfsara.nl");

    UserGroupInformation.setConfiguration(conf);

    loginUser = UserGroupInformation.getLoginUser();
    logger.info("Logged in as: " + loginUser.getUserName());
}

From source file:nl.surfsara.warcexamples.hdfs.Headers.java

License:Apache License

@Override
public void run() {
    // PropertyConfigurator.configure("log4jconfig.properties");
    final Configuration conf = new Configuration();
    // The core-site.xml and hdfs-site.xml are cluster specific. If you wish to use this on other clusters adapt the files as needed.
    conf.addResource(//from w ww.jav  a 2  s .c om
            Headers.class.getResourceAsStream("/nl/surfsara/warcexamples/hdfs/resources/core-site.xml"));
    conf.addResource(
            Headers.class.getResourceAsStream("/nl/surfsara/warcexamples/hdfs/resources/hdfs-site.xml"));

    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");

    System.setProperty("java.security.krb5.realm", "CUA.SURFSARA.NL");
    System.setProperty("java.security.krb5.kdc", "kdc.hathi.surfsara.nl");

    UserGroupInformation.setConfiguration(conf);

    UserGroupInformation loginUser;
    try {
        loginUser = UserGroupInformation.getLoginUser();
        System.out.println("Logged in as: " + loginUser.getUserName());
        PrintHeaders printHeaders = new PrintHeaders(conf, path);
        loginUser.doAs(printHeaders);
    } catch (IOException e) {
        // Just dump the error..
        e.printStackTrace();
    }
}

From source file:org.apache.accumulo.core.rpc.ThriftUtil.java

License:Apache License

/**
 * Some wonderful snippets of documentation from HBase on performing the re-login client-side (as well as server-side) in the following paragraph. We want to
 * attempt a re-login to automatically refresh the client's Krb "credentials" (remember, a server might also be a client, master sending RPC to tserver), but
 * we have to take care to avoid Kerberos' replay attack protection.
 * <p>// ww w .  jav a2 s . co m
 * If multiple clients with the same principal try to connect to the same server at the same time, the server assumes a replay attack is in progress. This is
 * a feature of kerberos. In order to work around this, what is done is that the client backs off randomly and tries to initiate the connection again. The
 * other problem is to do with ticket expiry. To handle that, a relogin is attempted.
 */
static void attemptClientReLogin() {
    try {
        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
        if (null == loginUser || !loginUser.hasKerberosCredentials()) {
            // We should have already checked that we're logged in and have credentials. A precondition-like check.
            throw new RuntimeException("Expected to find Kerberos UGI credentials, but did not");
        }
        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
        // A Proxy user is the "effective user" (in name only), riding on top of the "real user"'s Krb credentials.
        UserGroupInformation realUser = currentUser.getRealUser();

        // re-login only in case it is the login user or superuser.
        if (loginUser.equals(currentUser) || loginUser.equals(realUser)) {
            if (UserGroupInformation.isLoginKeytabBased()) {
                log.info("Performing keytab-based Kerberos re-login");
                loginUser.reloginFromKeytab();
            } else {
                log.info("Performing ticket-cache-based Kerberos re-login");
                loginUser.reloginFromTicketCache();
            }

            // Avoid the replay attack protection, sleep 1 to 5000ms
            try {
                Thread.sleep((SASL_BACKOFF_RAND.nextInt(RELOGIN_MAX_BACKOFF) + 1));
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                return;
            }
        } else {
            log.debug("Not attempting Kerberos re-login: loginUser={}, currentUser={}, realUser={}", loginUser,
                    currentUser, realUser);
        }
    } catch (IOException e) {
        // The inability to check is worrisome and deserves a RuntimeException instead of a propagated IO-like Exception.
        log.warn("Failed to check (and/or perform) Kerberos client re-login", e);
        throw new RuntimeException(e);
    }
}