Example usage for org.apache.hadoop.security SecurityUtil setAuthenticationMethod

List of usage examples for org.apache.hadoop.security SecurityUtil setAuthenticationMethod

Introduction

In this page you can find the example usage for org.apache.hadoop.security SecurityUtil setAuthenticationMethod.

Prototype

public static void setAuthenticationMethod(AuthenticationMethod authenticationMethod, Configuration conf) 

Source Link

Usage

From source file:com.github.sakserv.minicluster.impl.KdcLocalCluster.java

License:Apache License

protected void prepareSecureConfiguration(String username) throws Exception {
    baseConf = new Configuration(false);
    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
    baseConf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
    //baseConf.set(CommonConfigurationKeys.HADOOP_RPC_PROTECTION, "authentication");

    String sslConfigDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
    KeyStoreTestUtil.setupSSLConfig(baseDir, sslConfigDir, baseConf, false);

    // User//from   w w w.  j  a v a  2  s.c o m
    baseConf.set("hadoop.proxyuser." + username + ".hosts", "*");
    baseConf.set("hadoop.proxyuser." + username + ".groups", "*");

    // HTTP
    String spnegoPrincipal = getKrbPrincipalWithRealm(SPNEGO_USER_NAME);
    baseConf.set("hadoop.proxyuser." + SPNEGO_USER_NAME + ".groups", "*");
    baseConf.set("hadoop.proxyuser." + SPNEGO_USER_NAME + ".hosts", "*");

    // Oozie
    String ooziePrincipal = getKrbPrincipalWithRealm(OOZIE_USER_NAME);
    baseConf.set("hadoop.proxyuser." + OOZIE_USER_NAME + ".hosts", "*");
    baseConf.set("hadoop.proxyuser." + OOZIE_USER_NAME + ".groups", "*");
    baseConf.set("hadoop.user.group.static.mapping.overrides", OOZIE_PROXIED_USER_NAME + "=oozie");
    baseConf.set("oozie.service.HadoopAccessorService.keytab.file", getKeytabForPrincipal(OOZIE_USER_NAME));
    baseConf.set("oozie.service.HadoopAccessorService.kerberos.principal", ooziePrincipal);
    baseConf.setBoolean("oozie.service.HadoopAccessorService.kerberos.enabled", true);

    // HDFS
    String hdfsPrincipal = getKrbPrincipalWithRealm(HDFS_USER_NAME);
    baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, getKeytabForPrincipal(HDFS_USER_NAME));
    baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, getKeytabForPrincipal(HDFS_USER_NAME));
    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, getKeytabForPrincipal(SPNEGO_USER_NAME));
    baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
    baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
    baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
    baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

    // HBase
    String hbasePrincipal = getKrbPrincipalWithRealm(HBASE_USER_NAME);
    baseConf.set("hbase.security.authentication", "kerberos");
    baseConf.setBoolean("hbase.security.authorization", true);
    baseConf.set("hbase.regionserver.kerberos.principal", hbasePrincipal);
    baseConf.set("hbase.regionserver.keytab.file", getKeytabForPrincipal(HBASE_USER_NAME));
    baseConf.set("hbase.master.kerberos.principal", hbasePrincipal);
    baseConf.set("hbase.master.keytab.file", getKeytabForPrincipal(HBASE_USER_NAME));
    baseConf.set("hbase.coprocessor.region.classes", "org.apache.hadoop.hbase.security.token.TokenProvider");
    baseConf.set("hbase.rest.authentication.kerberos.keytab", getKeytabForPrincipal(SPNEGO_USER_NAME));
    baseConf.set("hbase.rest.authentication.kerberos.principal", spnegoPrincipal);
    baseConf.set("hbase.rest.kerberos.principal", hbasePrincipal);
    baseConf.set("hadoop.proxyuser." + HBASE_USER_NAME + ".groups", "*");
    baseConf.set("hadoop.proxyuser." + HBASE_USER_NAME + ".hosts", "*");

    //hbase.coprocessor.master.classes -> org.apache.hadoop.hbase.security.access.AccessController
    //hbase.coprocessor.region.classes -> org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController

    // Storm
    //String stormPrincipal = getKrbPrincipalWithRealm(STORM_USER_NAME);

    // Yarn
    String yarnPrincipal = getKrbPrincipalWithRealm(YARN_USER_NAME);
    baseConf.set("yarn.resourcemanager.keytab", getKeytabForPrincipal(YARN_USER_NAME));
    baseConf.set("yarn.resourcemanager.principal", yarnPrincipal);
    baseConf.set("yarn.nodemanager.keytab", getKeytabForPrincipal(YARN_USER_NAME));
    baseConf.set("yarn.nodemanager.principal", yarnPrincipal);

    // Mapreduce
    String mrv2Principal = getKrbPrincipalWithRealm(MRV2_USER_NAME);
    baseConf.set("mapreduce.jobhistory.keytab", getKeytabForPrincipal(MRV2_USER_NAME));
    baseConf.set("mapreduce.jobhistory.principal", mrv2Principal);
}

From source file:com.redsqirl.workflow.server.connect.ServerMain.java

License:Open Source License

public static void main(String[] arg) throws RemoteException {
    String userName = System.getProperty("user.name");
    int port = 2001;
    if (arg.length > 0) {
        try {//  w  w w  . j  a  va  2  s  . c  om
            port = Integer.valueOf(arg[0]);
        } catch (Exception e) {
            port = 2001;
        }
    }

    //Loads preferences
    WorkflowPrefManager runner = WorkflowPrefManager.getInstance();

    if (runner.isInit()) {
        // Loads in the log settings.
        BasicConfigurator.configure();

        try {

            if (WorkflowPrefManager.getSysProperty("core.workflow_lib_path") != null) {
                Logger.getRootLogger().setLevel(Level.DEBUG);
            } else {
                Logger.getRootLogger().setLevel(Level.INFO);
            }

            Logger.getRootLogger().addAppender(
                    new FileAppender(new PatternLayout("[%d{MMM dd HH:mm:ss}] %-5p (%F:%L) - %m%n"),
                            WorkflowPrefManager.getPathuserpref() + "/redsqirl-workflow.log"));
        } catch (Exception e) {
            logger.error("Fail to write log in temporary folder");
        }
        logger = Logger.getLogger(ServerMain.class);

        //Setup the user home if not setup yet
        WorkflowPrefManager.setupHome();
        WorkflowPrefManager.createUserFooter();

        NameNodeVar.set(WorkflowPrefManager.getSysProperty(WorkflowPrefManager.sys_namenode));
        NameNodeVar.setJobTracker(WorkflowPrefManager.getSysProperty(WorkflowPrefManager.sys_jobtracker));
        logger.debug("sys_namenode Path: " + NameNodeVar.get());

        //Login on kerberos if necessary
        if (WorkflowPrefManager.isSecEnable()) {
            logger.info("Security enabled");
            String hostname = WorkflowPrefManager.getProperty(WorkflowPrefManager.sys_sec_hostname);
            String keytabTemplate = WorkflowPrefManager
                    .getProperty(WorkflowPrefManager.sys_keytab_pat_template);
            String realm = WorkflowPrefManager.getProperty(WorkflowPrefManager.sys_kerberos_realm);
            if (keytabTemplate != null) {
                try {
                    String keytab = keytabTemplate.replaceAll("_USER", userName);
                    try {
                        //Update Hadoop security configurations
                        NameNodeVar.addToDefaultConf(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
                        NameNodeVar.addToDefaultConf(NameNodeVar.SERVER_KEYTAB_KEY, keytab);
                        NameNodeVar.addToDefaultConf(NameNodeVar.SERVER_PRINCIPAL_KEY,
                                userName + "/_HOST@" + realm);
                    } catch (Exception e) {
                        logger.error(e, e);
                    }
                    Configuration conf = NameNodeVar.getConf();

                    logger.info(NameNodeVar.getConfStr(conf));

                    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
                    logger.info("Keytab: " + keytab);
                    logger.info("user: " + userName);
                    Process p = Runtime.getRuntime().exec("kinit -k -t " + keytab + " " + SecurityUtil
                            .getServerPrincipal(conf.get(NameNodeVar.SERVER_PRINCIPAL_KEY), hostname));
                    p.waitFor();
                } catch (Exception e) {
                    logger.error("Fail to register to on kerberos: " + e, e);
                }
            }
        } else {
            logger.info("Security disabled");
        }

        try {

            logger.debug("start server main");

            String nameWorkflow = userName + "@wfm";

            String nameHDFS = userName + "@hdfs";
            String nameHDFSBrowser = userName + "@hdfsbrowser";
            String nameHcat = userName + "@hcat";
            String nameJdbc = userName + "@jdbc";
            String nameSshArray = userName + "@ssharray";

            String nameOozie = userName + "@oozie";
            String namePrefs = userName + "@prefs";
            String nameSuperActionManager = userName + "@samanager";

            try {
                registry = LocateRegistry.createRegistry(port);
                logger.debug(" ---------------- create registry");
            } catch (Exception e) {
                registry = LocateRegistry.getRegistry(port);
                logger.debug(" ---------------- Got registry");
            }

            int i = 0;

            DataFlowInterface dfi = (DataFlowInterface) WorkflowInterface.getInstance();
            while (i < 40) {
                try {
                    registry.rebind(nameWorkflow, dfi);
                    break;
                } catch (Exception e) {
                    ++i;
                    Thread.sleep(1000);
                    logger.debug("Sleep " + i);
                }
            }

            logger.debug("nameWorkflow: " + nameWorkflow);

            registry.rebind(nameJdbc, (DataStore) new JdbcStore());

            logger.debug("nameJdbc: " + nameJdbc);

            registry.rebind(nameHcat, (DataStore) new HCatStore());

            logger.debug("nameJdbc: " + nameJdbc);

            registry.rebind(nameOozie, (JobManager) OozieManager.getInstance());

            logger.debug("nameOozie: " + nameOozie);

            registry.rebind(nameSshArray, (SSHDataStoreArray) SSHInterfaceArray.getInstance());

            logger.debug("nameSshArray: " + nameSshArray);

            registry.rebind(nameHDFS, (DataStore) new HDFSInterface());

            logger.debug("nameHDFS: " + nameHDFS);

            registry.rebind(nameHDFSBrowser, (DataStore) new HDFSInterface());

            logger.debug("nameHDFSBrowser: " + nameHDFSBrowser);

            registry.rebind(namePrefs, (PropertiesManager) WorkflowPrefManager.getProps());

            logger.debug("namePrefs: " + namePrefs);

            logger.debug("nameHDFS: " + nameSuperActionManager);

            registry.rebind(nameSuperActionManager, (ModelManagerInt) new ModelManager());

            logger.debug("end server main");

        } catch (IOException e) {
            logger.error(e.getMessage(), e);

            System.exit(1);
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
            System.exit(1);

        }
    }
}

From source file:io.confluent.connect.hdfs.DataWriter.java

License:Apache License

public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) {
    try {/* w  ww. ja v a  2  s .co  m*/
        String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG);
        System.setProperty("hadoop.home.dir", hadoopHome);

        this.connectorConfig = connectorConfig;
        this.avroData = avroData;
        this.context = context;

        String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
        log.info("Hadoop configuration directory {}", hadoopConfDir);
        conf = new Configuration();
        if (!hadoopConfDir.equals("")) {
            conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
            conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
        }

        boolean secureHadoop = connectorConfig
                .getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
        if (secureHadoop) {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
            String principalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
            String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

            if (principalConfig == null || keytab == null) {
                throw new ConfigException(
                        "Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
                                + "the path to the keytab of the principal.");
            }

            conf.set("hadoop.security.authentication", "kerberos");
            conf.set("hadoop.security.authorization", "true");
            String hostname = InetAddress.getLocalHost().getCanonicalHostName();
            // replace the _HOST specified in the principal config to the actual host
            String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
            String namenodePrincipalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

            String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
            // namenode principal is needed for multi-node hadoop cluster
            if (conf.get("dfs.namenode.kerberos.principal") == null) {
                conf.set("dfs.namenode.kerberos.principal", namenodePrincipal);
            }
            log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
            final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
            log.info("Login as: " + ugi.getUserName());

            final long renewPeriod = connectorConfig
                    .getLong(HdfsSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

            isRunning = true;
            ticketRenewThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    synchronized (DataWriter.this) {
                        while (isRunning) {
                            try {
                                DataWriter.this.wait(renewPeriod);
                                if (isRunning) {
                                    ugi.reloginFromKeytab();
                                }
                            } catch (IOException e) {
                                // We ignore this exception during relogin as each successful relogin gives
                                // additional 24 hours of authentication in the default config. In normal
                                // situations, the probability of failing relogin 24 times is low and if
                                // that happens, the task will fail eventually.
                                log.error("Error renewing the ticket", e);
                            } catch (InterruptedException e) {
                                // ignored
                            }
                        }
                    }
                }
            });
            log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
            ticketRenewThread.start();
        }

        url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
        topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
        String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);

        @SuppressWarnings("unchecked")
        Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
                .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
        storage = StorageFactory.createStorage(storageClass, conf, url);

        createDir(topicsDir);
        createDir(topicsDir + HdfsSinkConnectorConstants.TEMPFILE_DIRECTORY);
        createDir(logsDir);

        format = getFormat();
        writerProvider = format.getRecordWriterProvider();
        schemaFileReader = format.getSchemaFileReader(avroData);

        partitioner = createPartitioner(connectorConfig);

        assignment = new HashSet<>(context.assignment());
        offsets = new HashMap<>();

        hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
        if (hiveIntegration) {
            hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
            hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
            hive = format.getHiveUtil(connectorConfig, avroData, hiveMetaStore);
            executorService = Executors.newSingleThreadExecutor();
            hiveUpdateFutures = new LinkedList<>();
        }

        topicPartitionWriters = new HashMap<>();
        for (TopicPartition tp : assignment) {
            TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider,
                    partitioner, connectorConfig, context, avroData, hiveMetaStore, hive, schemaFileReader,
                    executorService, hiveUpdateFutures);
            topicPartitionWriters.put(tp, topicPartitionWriter);
        }
    } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
        throw new ConnectException("Reflection exception: ", e);
    } catch (IOException e) {
        throw new ConnectException(e);
    }
}

From source file:io.confluent.connect.hdfs.TestWithSecureMiniDFSCluster.java

License:Apache License

private Configuration createSecureConfig(String dataTransferProtection) throws Exception {
    HdfsConfiguration conf = new HdfsConfiguration();
    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
    conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
    conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
    conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
    conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
    conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
    conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
    conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
    conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
    conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY, "true");//https://issues.apache.org/jira/browse/HDFS-7431
    String keystoresDir = baseDir.getAbsolutePath();
    String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
    return conf;//from   w w  w  .  j a  v a  2  s  .  co  m
}

From source file:org.apache.atlas.hook.AtlasTopicCreator.java

License:Apache License

@VisibleForTesting
protected boolean handleSecurity(Configuration atlasProperties) {
    if (AuthenticationUtil.isKerberosAuthenticationEnabled(atlasProperties)) {
        String kafkaPrincipal = atlasProperties.getString("atlas.notification.kafka.service.principal");
        String kafkaKeyTab = atlasProperties.getString("atlas.notification.kafka.keytab.location");
        org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
        SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, hadoopConf);
        try {/*from w w  w.  ja  v a2  s  .c o  m*/
            String serverPrincipal = SecurityUtil.getServerPrincipal(kafkaPrincipal, (String) null);
            UserGroupInformation.setConfiguration(hadoopConf);
            UserGroupInformation.loginUserFromKeytab(serverPrincipal, kafkaKeyTab);
        } catch (IOException e) {
            LOG.warn("Could not login as {} from keytab file {}", kafkaPrincipal, kafkaKeyTab, e);
            return false;
        }
    }
    return true;
}

From source file:org.apache.atlas.web.listeners.LoginProcessor.java

License:Apache License

protected void setupHadoopConfiguration(Configuration hadoopConfig,
        org.apache.commons.configuration.Configuration configuration) {
    String authMethod;/*from w  ww . j a v a  2  s.  c o m*/
    authMethod = configuration != null ? configuration.getString(AUTHENTICATION_METHOD) : null;
    // getString may return null, and would like to log the nature of the default setting
    if (authMethod == null) {
        LOG.info("No authentication method configured.  Defaulting to simple authentication");
        authMethod = "simple";
    }
    SecurityUtil.setAuthenticationMethod(
            UserGroupInformation.AuthenticationMethod.valueOf(authMethod.toUpperCase()), hadoopConfig);
}

From source file:org.apache.flink.streaming.connectors.fs.RollingSinkSecuredITCase.java

License:Apache License

private static void populateSecureConfigurations() {

    String dataTransferProtection = "authentication";

    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
    conf.set(DFS_NAMENODE_USER_NAME_KEY, SecureTestEnvironment.getHadoopServicePrincipal());
    conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, SecureTestEnvironment.getTestKeytab());
    conf.set(DFS_DATANODE_USER_NAME_KEY, SecureTestEnvironment.getHadoopServicePrincipal());
    conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, SecureTestEnvironment.getTestKeytab());
    conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SecureTestEnvironment.getHadoopServicePrincipal());

    conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);

    conf.set("dfs.data.transfer.protection", dataTransferProtection);

    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_ONLY.name());

    conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY, "false");

    conf.setInt("dfs.datanode.socket.write.timeout", 0);

    /*//w  ww  . j a v a 2  s .c  om
     * We ae setting the port number to privileged port - see HDFS-9213
     * This requires the user to have root privilege to bind to the port
     * Use below command (ubuntu) to set privilege to java process for the
     * bind() to work if the java process is not running as root.
     * setcap 'cap_net_bind_service=+ep' /path/to/java
     */
    conf.set(DFS_DATANODE_ADDRESS_KEY, "localhost:1002");
    conf.set(DFS_DATANODE_HOST_NAME_KEY, "localhost");
    conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:1003");
}

From source file:org.apache.hoya.tools.HoyaUtils.java

License:Apache License

/**
 * Turn on security. This is setup to only run once.
 * @param conf configuration to build up security
 * @return true if security was initialized in this call
 * @throws IOException IO/Net problems//from w  w w. j av  a2  s .  c  om
 * @throws BadConfigException the configuration and system state are inconsistent
 */
public static boolean initProcessSecurity(Configuration conf) throws IOException, BadConfigException {

    if (processSecurityAlreadyInitialized.compareAndSet(true, true)) {
        //security is already inited
        return false;
    }

    log.info("JVM initialized into secure mode with kerberos realm {}", HoyaUtils.getKerberosRealm());
    //this gets UGI to reset its previous world view (i.e simple auth)
    //security
    log.debug("java.security.krb5.realm={}", System.getProperty("java.security.krb5.realm", ""));
    log.debug("java.security.krb5.kdc={}", System.getProperty("java.security.krb5.kdc", ""));
    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation authUser = UserGroupInformation.getCurrentUser();
    log.debug("Authenticating as " + authUser.toString());
    log.debug("Login user is {}", UserGroupInformation.getLoginUser());
    if (!UserGroupInformation.isSecurityEnabled()) {
        throw new BadConfigException("Although secure mode is enabled,"
                + "the application has already set up its user as an insecure entity %s", authUser);
    }
    if (authUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.SIMPLE) {
        throw new BadConfigException("Auth User is not Kerberized %s"
                + " -security has already been set up with the wrong authentication method", authUser);

    }

    HoyaUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
    HoyaUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
    return true;
}

From source file:org.elasticsearch.repositories.hdfs.HaHdfsFailoverTestSuiteIT.java

License:Apache License

public void testHAFailoverWithRepository() throws Exception {
    RestClient client = client();// w  w  w .jav  a  2 s. c  o  m
    Map<String, String> emptyParams = Collections.emptyMap();
    Header contentHeader = new BasicHeader("Content-Type", "application/json");

    String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
    String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
    String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
    boolean securityEnabled = hdfsKerberosPrincipal != null;

    Configuration hdfsConfiguration = new Configuration();
    hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
    hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
    hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001");
    hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002");
    hdfsConfiguration.set("dfs.client.failover.proxy.provider.ha-hdfs",
            "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");

    AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
        if (securityEnabled) {
            // ensure that keytab exists
            Path kt = PathUtils.get(kerberosKeytabLocation);
            if (Files.exists(kt) == false) {
                throw new IllegalStateException("Could not locate keytab at " + kerberosKeytabLocation);
            }
            if (Files.isReadable(kt) != true) {
                throw new IllegalStateException("Could not read keytab at " + kerberosKeytabLocation);
            }
            logger.info("Keytab Length: " + Files.readAllBytes(kt).length);

            // set principal names
            hdfsConfiguration.set("dfs.namenode.kerberos.principal", hdfsKerberosPrincipal);
            hdfsConfiguration.set("dfs.datanode.kerberos.principal", hdfsKerberosPrincipal);
            hdfsConfiguration.set("dfs.data.transfer.protection", "authentication");

            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS,
                    hdfsConfiguration);
            UserGroupInformation.setConfiguration(hdfsConfiguration);
            UserGroupInformation.loginUserFromKeytab(hdfsKerberosPrincipal, kerberosKeytabLocation);
        } else {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE,
                    hdfsConfiguration);
            UserGroupInformation.setConfiguration(hdfsConfiguration);
            UserGroupInformation.getCurrentUser();
        }
        return null;
    });

    // Create repository
    {
        Response response = client.performRequest("PUT", "/_snapshot/hdfs_ha_repo_read", emptyParams,
                new NStringEntity("{" + "\"type\":\"hdfs\"," + "\"settings\":{"
                        + "\"uri\": \"hdfs://ha-hdfs/\",\n"
                        + "\"path\": \"/user/elasticsearch/existing/readonly-repository\","
                        + "\"readonly\": \"true\"," + securityCredentials(securityEnabled, esKerberosPrincipal)
                        + "\"conf.dfs.nameservices\": \"ha-hdfs\","
                        + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\","
                        + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\","
                        + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\","
                        + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": "
                        + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}"
                        + "}", Charset.defaultCharset()),
                contentHeader);

        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }

    // Get repository
    {
        Response response = client.performRequest("GET", "/_snapshot/hdfs_ha_repo_read/_all", emptyParams);
        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }

    // Failover the namenode to the second.
    failoverHDFS("nn1", "nn2", hdfsConfiguration);

    // Get repository again
    {
        Response response = client.performRequest("GET", "/_snapshot/hdfs_ha_repo_read/_all", emptyParams);
        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }
}

From source file:rpc.TestRPC.java

License:Apache License

@Test
public void testErrorMsgForInsecureClient() throws IOException {
    Configuration serverConf = new Configuration(conf);
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, serverConf);
    UserGroupInformation.setConfiguration(serverConf);

    final Server server = new RPC.Builder(serverConf).setProtocol(TestProtocol.class)
            .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
            .build();/*from w  w  w. j a  va2 s . c  o  m*/
    server.start();

    UserGroupInformation.setConfiguration(conf);
    boolean succeeded = false;
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    TestProtocol proxy = null;
    try {
        proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
        proxy.echo("");
    } catch (RemoteException e) {
        LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
        assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
        succeeded = true;
    } finally {
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
    }
    assertTrue(succeeded);

    conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);

    UserGroupInformation.setConfiguration(serverConf);
    final Server multiServer = new RPC.Builder(serverConf).setProtocol(TestProtocol.class)
            .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
            .build();
    multiServer.start();
    succeeded = false;
    final InetSocketAddress mulitServerAddr = NetUtils.getConnectAddress(multiServer);
    proxy = null;
    try {
        UserGroupInformation.setConfiguration(conf);
        proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, mulitServerAddr, conf);
        proxy.echo("");
    } catch (RemoteException e) {
        LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
        assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
        succeeded = true;
    } finally {
        multiServer.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
    }
    assertTrue(succeeded);
}