List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY
To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY.
Click Source Link
From source file:org.apache.phoenix.end2end.HttpParamImpersonationQueryServerIT.java
License:Apache License
/** * Setup the security configuration for hdfs. *///w w w .j av a2s. c om private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { // Set principal+keytab configuration for HDFS conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm()); // Enable token access for HDFS blocks conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); // Only use HTTPS (required because we aren't using "secure" ports) conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); // Bind on localhost for spnego to have a chance at working conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); // Generate SSL certs File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath()); keystoresDir.mkdirs(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(HttpParamImpersonationQueryServerIT.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false); // Magic flag to tell hdfs to not fail on using ports above 1024 conf.setBoolean("ignore.secure.ports.for.testing", true); }
From source file:org.apache.phoenix.end2end.SecureQueryServerIT.java
License:Apache License
/** * Setup the security configuration for hdfs. *//*from ww w. jav a2 s .c o m*/ private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { // Set principal+keytab configuration for HDFS conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, SERVICE_PRINCIPAL + "@" + KDC.getRealm()); conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SPNEGO_PRINCIPAL + "@" + KDC.getRealm()); // Enable token access for HDFS blocks conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); // Only use HTTPS (required because we aren't using "secure" ports) conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); // Bind on localhost for spnego to have a chance at working conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); // Generate SSL certs File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath()); keystoresDir.mkdirs(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(SecureQueryServerIT.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false); // Magic flag to tell hdfs to not fail on using ports above 1024 conf.setBoolean("ignore.secure.ports.for.testing", true); }
From source file:org.apache.slider.client.SliderClient.java
License:Apache License
/** * * @param clustername name of the cluster * @param clusterDirectory cluster dir//from w w w .jav a2 s .c o m * @param instanceDefinition the instance definition * @param debugAM enable debug AM options * @return the launched application * @throws YarnException * @throws IOException */ public LaunchedApplication launchApplication(String clustername, Path clusterDirectory, AggregateConf instanceDefinition, boolean debugAM) throws YarnException, IOException { deployedClusterName = clustername; SliderUtils.validateClusterName(clustername); verifyNoLiveClusters(clustername, "Launch"); Configuration config = getConfig(); lookupZKQuorum(); boolean clusterSecure = SliderUtils.isHadoopClusterSecure(config); //create the Slider AM provider -this helps set up the AM SliderAMClientProvider sliderAM = new SliderAMClientProvider(config); instanceDefinition.resolve(); launchedInstanceDefinition = instanceDefinition; ConfTreeOperations internalOperations = instanceDefinition.getInternalOperations(); MapOperations internalOptions = internalOperations.getGlobalOptions(); ConfTreeOperations resourceOperations = instanceDefinition.getResourceOperations(); ConfTreeOperations appOperations = instanceDefinition.getAppConfOperations(); Path generatedConfDirPath = createPathThatMustExist( internalOptions.getMandatoryOption(InternalKeys.INTERNAL_GENERATED_CONF_PATH)); Path snapshotConfPath = createPathThatMustExist( internalOptions.getMandatoryOption(InternalKeys.INTERNAL_SNAPSHOT_CONF_PATH)); // cluster Provider AbstractClientProvider provider = createClientProvider( internalOptions.getMandatoryOption(InternalKeys.INTERNAL_PROVIDER_NAME)); // make sure the conf dir is valid; if (log.isDebugEnabled()) { log.debug(instanceDefinition.toString()); } MapOperations sliderAMResourceComponent = resourceOperations.getOrAddComponent(SliderKeys.COMPONENT_AM); MapOperations resourceGlobalOptions = resourceOperations.getGlobalOptions(); // add the tags if available Set<String> applicationTags = provider.getApplicationTags(sliderFileSystem, appOperations.getGlobalOptions().get(AgentKeys.APP_DEF)); AppMasterLauncher amLauncher = new AppMasterLauncher(clustername, SliderKeys.APP_TYPE, config, sliderFileSystem, yarnClient, clusterSecure, sliderAMResourceComponent, resourceGlobalOptions, applicationTags); ApplicationId appId = amLauncher.getApplicationId(); // set the application name; amLauncher.setKeepContainersOverRestarts(true); int maxAppAttempts = config.getInt(KEY_AM_RESTART_LIMIT, 0); amLauncher.setMaxAppAttempts(maxAppAttempts); sliderFileSystem.purgeAppInstanceTempFiles(clustername); Path tempPath = sliderFileSystem.createAppInstanceTempPath(clustername, appId.toString() + "/am"); String libdir = "lib"; Path libPath = new Path(tempPath, libdir); sliderFileSystem.getFileSystem().mkdirs(libPath); log.debug("FS={}, tempPath={}, libdir={}", sliderFileSystem.toString(), tempPath, libPath); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = amLauncher.getLocalResources(); // look for the configuration directory named on the command line boolean hasServerLog4jProperties = false; Path remoteConfPath = null; String relativeConfDir = null; String confdirProp = System.getProperty(SliderKeys.PROPERTY_CONF_DIR); if (confdirProp == null || confdirProp.isEmpty()) { log.debug("No local configuration directory provided as system property"); } else { File confDir = new File(confdirProp); if (!confDir.exists()) { throw new BadConfigException(E_CONFIGURATION_DIRECTORY_NOT_FOUND, confDir); } Path localConfDirPath = SliderUtils.createLocalPath(confDir); remoteConfPath = new Path(clusterDirectory, SliderKeys.SUBMITTED_CONF_DIR); log.debug("Slider configuration directory is {}; remote to be {}", localConfDirPath, remoteConfPath); SliderUtils.copyDirectory(config, localConfDirPath, remoteConfPath, null); File log4jserver = new File(confDir, SliderKeys.LOG4J_SERVER_PROP_FILENAME); hasServerLog4jProperties = log4jserver.isFile(); } // the assumption here is that minimr cluster => this is a test run // and the classpath can look after itself boolean usingMiniMRCluster = getUsingMiniMRCluster(); if (!usingMiniMRCluster) { log.debug("Destination is not a MiniYARNCluster -copying full classpath"); // insert conf dir first if (remoteConfPath != null) { relativeConfDir = SliderKeys.SUBMITTED_CONF_DIR; Map<String, LocalResource> submittedConfDir = sliderFileSystem.submitDirectory(remoteConfPath, relativeConfDir); SliderUtils.mergeMaps(localResources, submittedConfDir); } } // build up the configuration // IMPORTANT: it is only after this call that site configurations // will be valid. propagatePrincipals(config, instanceDefinition); // validate security data /* // turned off until tested SecurityConfiguration securityConfiguration = new SecurityConfiguration(config, instanceDefinition, clustername); */ Configuration clientConfExtras = new Configuration(false); // then build up the generated path. FsPermission clusterPerms = getClusterDirectoryPermissions(config); SliderUtils.copyDirectory(config, snapshotConfPath, generatedConfDirPath, clusterPerms); // standard AM resources sliderAM.prepareAMAndConfigForLaunch(sliderFileSystem, config, amLauncher, instanceDefinition, snapshotConfPath, generatedConfDirPath, clientConfExtras, libdir, tempPath, usingMiniMRCluster); //add provider-specific resources provider.prepareAMAndConfigForLaunch(sliderFileSystem, config, amLauncher, instanceDefinition, snapshotConfPath, generatedConfDirPath, clientConfExtras, libdir, tempPath, usingMiniMRCluster); // now that the site config is fully generated, the provider gets // to do a quick review of them. log.debug("Preflight validation of cluster configuration"); sliderAM.preflightValidateClusterConfiguration(sliderFileSystem, clustername, config, instanceDefinition, clusterDirectory, generatedConfDirPath, clusterSecure); provider.preflightValidateClusterConfiguration(sliderFileSystem, clustername, config, instanceDefinition, clusterDirectory, generatedConfDirPath, clusterSecure); // TODO: consider supporting apps that don't have an image path Path imagePath = SliderUtils.extractImagePath(sliderFileSystem, internalOptions); if (sliderFileSystem.maybeAddImagePath(localResources, imagePath)) { log.debug("Registered image path {}", imagePath); } // build the environment amLauncher.putEnv(SliderUtils.buildEnvMap(sliderAMResourceComponent)); ClasspathConstructor classpath = SliderUtils.buildClasspath(relativeConfDir, libdir, getConfig(), usingMiniMRCluster); amLauncher.setClasspath(classpath); if (log.isDebugEnabled()) { log.debug("AM classpath={}", classpath); log.debug("Environment Map:\n{}", SliderUtils.stringifyMap(amLauncher.getEnv())); log.debug("Files in lib path\n{}", sliderFileSystem.listFSDir(libPath)); } // rm address InetSocketAddress rmSchedulerAddress; try { rmSchedulerAddress = SliderUtils.getRmSchedulerAddress(config); } catch (IllegalArgumentException e) { throw new BadConfigException("%s Address invalid: %s", YarnConfiguration.RM_SCHEDULER_ADDRESS, config.get(YarnConfiguration.RM_SCHEDULER_ADDRESS)); } String rmAddr = NetUtils.getHostPortString(rmSchedulerAddress); JavaCommandLineBuilder commandLine = new JavaCommandLineBuilder(); // insert any JVM options); sliderAM.addJVMOptions(instanceDefinition, commandLine); // enable asserts if the text option is set commandLine.enableJavaAssertions(); // if the conf dir has a log4j-server.properties, switch to that if (hasServerLog4jProperties) { commandLine.sysprop(SYSPROP_LOG4J_CONFIGURATION, LOG4J_SERVER_PROP_FILENAME); commandLine.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR); } // add the AM sevice entry point commandLine.add(SliderAppMaster.SERVICE_CLASSNAME); // create action and the cluster name commandLine.add(ACTION_CREATE, clustername); // debug if (debugAM) { commandLine.add(Arguments.ARG_DEBUG); } // set the cluster directory path commandLine.add(Arguments.ARG_CLUSTER_URI, clusterDirectory.toUri()); if (!isUnset(rmAddr)) { commandLine.add(Arguments.ARG_RM_ADDR, rmAddr); } if (serviceArgs.getFilesystemBinding() != null) { commandLine.add(Arguments.ARG_FILESYSTEM, serviceArgs.getFilesystemBinding()); } /** * pass the registry binding */ addConfOptionToCLI(commandLine, config, REGISTRY_PATH, DEFAULT_REGISTRY_PATH); addMandatoryConfOptionToCLI(commandLine, config, RegistryConstants.KEY_REGISTRY_ZK_QUORUM); if (clusterSecure) { // if the cluster is secure, make sure that // the relevant security settings go over /* addConfOptionToCLI(commandLine, config, KEY_SECURITY); */ addConfOptionToCLI(commandLine, config, DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY); } // write out the path output commandLine.addOutAndErrFiles(STDOUT_AM, STDERR_AM); String cmdStr = commandLine.build(); log.debug("Completed setting up app master command {}", cmdStr); amLauncher.addCommandLine(commandLine); // the Slider AM gets to configure the AM requirements, not the custom provider sliderAM.prepareAMResourceRequirements(sliderAMResourceComponent, amLauncher.getResource()); // Set the priority for the application master int amPriority = config.getInt(KEY_YARN_QUEUE_PRIORITY, DEFAULT_YARN_QUEUE_PRIORITY); amLauncher.setPriority(amPriority); // Set the queue to which this application is to be submitted in the RM // Queue for App master String amQueue = config.get(KEY_YARN_QUEUE, DEFAULT_YARN_QUEUE); String suppliedQueue = internalOperations.getGlobalOptions().get(InternalKeys.INTERNAL_QUEUE); if (!SliderUtils.isUnset(suppliedQueue)) { amQueue = suppliedQueue; log.info("Using queue {} for the application instance.", amQueue); } if (amQueue != null) { amLauncher.setQueue(amQueue); } // submit the application LaunchedApplication launchedApplication = amLauncher.submitApplication(); return launchedApplication; }
From source file:org.apache.slider.client.SliderClient.java
License:Apache License
/** * Propagate any critical principals from the current site config down to the HBase one. * @param config config to read from/*ww w . j ava 2 s .co m*/ * @param clusterSpec cluster spec */ private void propagatePrincipals(Configuration config, AggregateConf clusterSpec) { String dfsPrincipal = config.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY); if (dfsPrincipal != null) { String siteDfsPrincipal = OptionKeys.SITE_XML_PREFIX + DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; clusterSpec.getAppConfOperations().getGlobalOptions().putIfUnset(siteDfsPrincipal, dfsPrincipal); } }
From source file:org.apache.slider.common.tools.SliderUtils.java
License:Apache License
/** * Turn on security. This is setup to only run once. * @param conf configuration to build up security * @return true if security was initialized in this call * @throws IOException IO/Net problems/*from w w w . j a va 2 s . co m*/ * @throws BadConfigException the configuration and system state are inconsistent */ public static boolean initProcessSecurity(Configuration conf) throws IOException, BadConfigException { if (processSecurityAlreadyInitialized.compareAndSet(true, true)) { //security is already inited return false; } log.info("JVM initialized into secure mode with kerberos realm {}", SliderUtils.getKerberosRealm()); //this gets UGI to reset its previous world view (i.e simple auth) //security log.debug("java.security.krb5.realm={}", System.getProperty(JAVA_SECURITY_KRB5_REALM, "")); log.debug("java.security.krb5.kdc={}", System.getProperty(JAVA_SECURITY_KRB5_KDC, "")); log.debug("hadoop.security.authentication={}", conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION)); log.debug("hadoop.security.authorization={}", conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION)); /* SecurityUtil.setAuthenticationMethod( UserGroupInformation.AuthenticationMethod.KERBEROS, conf);*/ UserGroupInformation.setConfiguration(conf); UserGroupInformation authUser = UserGroupInformation.getCurrentUser(); log.debug("Authenticating as " + authUser.toString()); log.debug("Login user is {}", UserGroupInformation.getLoginUser()); if (!UserGroupInformation.isSecurityEnabled()) { throw new BadConfigException("Although secure mode is enabled," + "the application has already set up its user as an insecure entity %s", authUser); } if (authUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.SIMPLE) { throw new BadConfigException("Auth User is not Kerberized %s" + " -security has already been set up with the wrong authentication method. " + "This can occur if a file system has already been created prior to the loading of " + "the security configuration.", authUser); } SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL); SliderUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY); return true; }
From source file:org.apache.slider.core.build.InstanceBuilder.java
License:Apache License
/** * Propagate any critical principals from the current site config down to the HBase one. *///from w w w. j a v a 2 s . c o m public void propagatePrincipals() { String dfsPrincipal = conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY); if (dfsPrincipal != null) { String siteDfsPrincipal = OptionKeys.SITE_XML_PREFIX + DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; instanceDescription.getAppConfOperations().set(siteDfsPrincipal, dfsPrincipal); } }
From source file:org.apache.slider.server.appmaster.SliderAppMaster.java
License:Apache License
@Override //AbstractService public synchronized void serviceInit(Configuration conf) throws Exception { // slider client if found Configuration customConf = SliderUtils.loadClientConfigurationResource(); // Load in the server configuration - if it is actually on the Classpath Configuration serverConf = ConfigHelper.loadFromResource(SERVER_RESOURCE); ConfigHelper.mergeConfigurations(customConf, serverConf, SERVER_RESOURCE, true); serviceArgs.applyDefinitions(customConf); serviceArgs.applyFileSystemBinding(customConf); // conf now contains all customizations AbstractActionArgs action = serviceArgs.getCoreAction(); SliderAMCreateAction createAction = (SliderAMCreateAction) action; // sort out the location of the AM String rmAddress = createAction.getRmAddress(); if (rmAddress != null) { log.debug("Setting rm address from the command line: {}", rmAddress); SliderUtils.setRmSchedulerAddress(customConf, rmAddress); }/* w ww.j a va2 s . c om*/ log.info("AM configuration:\n{}", ConfigHelper.dumpConfigToString(customConf)); ConfigHelper.mergeConfigurations(conf, customConf, CLIENT_RESOURCE, true); //init security with our conf if (SliderUtils.isHadoopClusterSecure(conf)) { log.info("Secure mode with kerberos realm {}", SliderUtils.getKerberosRealm()); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); log.debug("Authenticating as {}", ugi); SliderUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY); } else { log.info("Cluster is insecure"); } log.info("Login user is {}", UserGroupInformation.getLoginUser()); //look at settings of Hadoop Auth, to pick up a problem seen once checkAndWarnForAuthTokenProblems(); // validate server env boolean dependencyChecks = !conf.getBoolean(KEY_SLIDER_AM_DEPENDENCY_CHECKS_DISABLED, false); SliderUtils.validateSliderServerEnvironment(log, dependencyChecks); executorService = new WorkflowExecutorService<ExecutorService>("AmExecutor", Executors.newFixedThreadPool(2, new ServiceThreadFactory("AmExecutor", true))); addService(executorService); addService(actionQueues); //init all child services super.serviceInit(conf); }
From source file:org.apache.slider.server.services.security.FsDelegationTokenManager.java
License:Apache License
private void createRemoteUser(Configuration configuration) throws IOException { Configuration loginConfig = new Configuration(configuration); loginConfig.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); // using HDFS principal... this.remoteUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( SecurityUtil.getServerPrincipal(loginConfig.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), InetAddress.getLocalHost().getCanonicalHostName()), loginConfig.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); log.info("Created remote user {}. UGI reports current user is {}", this.remoteUser, UserGroupInformation.getCurrentUser()); }