List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled
public static boolean isSecurityEnabled()
From source file:org.apache.accumulo.tserver.TabletServer.java
License:Apache License
public static void main(String[] args) throws IOException { try {/*from ww w . j av a 2 s . co m*/ final String app = "tserver"; Accumulo.setupLogging(app); SecurityUtil.serverLogin(SiteConfiguration.getInstance()); ServerOpts opts = new ServerOpts(); opts.parseArgs(app, args); String hostname = opts.getAddress(); ServerConfigurationFactory conf = new ServerConfigurationFactory(HdfsZooInstance.getInstance()); VolumeManager fs = VolumeManagerImpl.get(); Accumulo.init(fs, conf, app); final TabletServer server = new TabletServer(conf, fs); server.config(hostname); DistributedTrace.enable(hostname, app, conf.getConfiguration()); if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); loginUser.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() { server.run(); return null; } }); } else { server.run(); } } catch (Exception ex) { log.error("Uncaught exception in TabletServer.main, exiting", ex); System.exit(1); } finally { DistributedTrace.disable(); } }
From source file:org.apache.atlas.web.listeners.LoginProcessorIT.java
License:Apache License
@Test public void testDefaultSimpleLogin() throws Exception { LoginProcessor processor = new LoginProcessor() { @Override/*from w w w . j av a 2 s. c o m*/ protected org.apache.commons.configuration.Configuration getApplicationConfiguration() { return new PropertiesConfiguration(); } }; processor.login(); Assert.assertNotNull(UserGroupInformation.getCurrentUser()); Assert.assertFalse(UserGroupInformation.isLoginKeytabBased()); Assert.assertFalse(UserGroupInformation.isSecurityEnabled()); }
From source file:org.apache.atlas.web.listeners.LoginProcessorIT.java
License:Apache License
@Test public void testKerberosLogin() throws Exception { final File keytab = setupKDCAndPrincipals(); LoginProcessor processor = new LoginProcessor() { @Override/* w ww . ja va 2s . c o m*/ protected org.apache.commons.configuration.Configuration getApplicationConfiguration() { PropertiesConfiguration config = new PropertiesConfiguration(); config.setProperty("atlas.authentication.method", "kerberos"); config.setProperty("atlas.authentication.principal", "dgi@EXAMPLE.COM"); config.setProperty("atlas.authentication.keytab", keytab.getAbsolutePath()); return config; } @Override protected Configuration getHadoopConfiguration() { Configuration config = new Configuration(false); config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); config.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true); config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, kerberosRule); return config; } @Override protected boolean isHadoopCluster() { return true; } }; processor.login(); Assert.assertTrue(UserGroupInformation.getLoginUser().getShortUserName().endsWith("dgi")); Assert.assertNotNull(UserGroupInformation.getCurrentUser()); Assert.assertTrue(UserGroupInformation.isLoginKeytabBased()); Assert.assertTrue(UserGroupInformation.isSecurityEnabled()); kdc.stop(); }
From source file:org.apache.drill.exec.server.rest.auth.SpnegoConfig.java
License:Apache License
private UserGroupInformation loginAndReturnUgi() throws DrillException { validateSpnegoConfig();/*from w ww .j av a2 s . co m*/ UserGroupInformation ugi; try { // Check if security is not enabled and try to set the security parameter to login the principal. // After the login is performed reset the static UGI state. if (!UserGroupInformation.isSecurityEnabled()) { final Configuration newConfig = new Configuration(); newConfig.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.toString()); if (clientNameMapping != null) { newConfig.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, clientNameMapping); } UserGroupInformation.setConfiguration(newConfig); ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab); // Reset the original configuration for static UGI UserGroupInformation.setConfiguration(new Configuration()); } else { // Let's not overwrite the rules here since it might be possible that CUSTOM security is configured for // JDBC/ODBC with default rules. If Kerberos was enabled then the correct rules must already be set ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab); } } catch (Exception e) { throw new DrillException(String.format("Login failed for %s with given keytab", principal), e); } return ugi; }
From source file:org.apache.druid.indexer.JobHelper.java
License:Apache License
/** * Dose authenticate against a secured hadoop cluster * In case of any bug fix make sure to fix the code at HdfsStorageAuthentication#authenticate as well. * * @param config containing the principal name and keytab path. */// ww w . j a v a2 s . co m public static void authenticate(HadoopDruidIndexerConfig config) { String principal = config.HADOOP_KERBEROS_CONFIG.getPrincipal(); String keytab = config.HADOOP_KERBEROS_CONFIG.getKeytab(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) { Configuration conf = new Configuration(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { try { if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) { log.info("trying to authenticate user [%s] with keytab [%s]", principal, keytab); UserGroupInformation.loginUserFromKeytab(principal, keytab); } } catch (IOException e) { throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab); } } } }
From source file:org.apache.falcon.catalog.HiveCatalogService.java
License:Apache License
/** * This is used from with in an oozie job. * * @param conf conf object// w ww . ja v a 2 s . c om * @param metastoreUrl metastore uri * @return hive metastore client handle * @throws FalconException */ private static HiveMetaStoreClient createClient(Configuration conf, String metastoreUrl) throws FalconException { try { LOG.info("Creating HCatalog client object for metastore {} using conf {}", metastoreUrl, conf.toString()); final Credentials credentials = getCredentials(conf); Configuration jobConf = credentials != null ? copyCredentialsToConf(conf, credentials) : conf; HiveConf hcatConf = createHiveConf(jobConf, metastoreUrl); if (UserGroupInformation.isSecurityEnabled()) { hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, conf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname)); hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true"); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ugi.addCredentials(credentials); // credentials cannot be null } return new HiveMetaStoreClient(hcatConf); } catch (Exception e) { throw new FalconException("Exception creating HiveMetaStoreClient: " + e.getMessage(), e); } }
From source file:org.apache.falcon.catalog.HiveCatalogService.java
License:Apache License
private static void addSecureCredentialsAndToken(Configuration conf, HiveConf hcatConf, UserGroupInformation proxyUGI) throws IOException { if (UserGroupInformation.isSecurityEnabled()) { String metaStoreServicePrincipal = conf.get(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL); hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, metaStoreServicePrincipal); hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true"); Token<DelegationTokenIdentifier> delegationTokenId = getDelegationToken(hcatConf, metaStoreServicePrincipal); proxyUGI.addToken(delegationTokenId); }//w w w . j ava2 s .c o m }
From source file:org.apache.falcon.entity.parser.ClusterEntityParser.java
License:Apache License
private void validateFileSystem(Cluster cluster, String storageUrl) throws ValidationException { try {/*from w ww. j av a2 s . c o m*/ Configuration conf = new Configuration(); conf.set(HadoopClientFactory.FS_DEFAULT_NAME_KEY, storageUrl); conf.setInt("ipc.client.connect.max.retries", 10); if (UserGroupInformation.isSecurityEnabled()) { String nameNodePrincipal = ClusterHelper.getPropertyValue(cluster, SecurityUtil.NN_PRINCIPAL); Validate.notEmpty(nameNodePrincipal, "Cluster definition missing required namenode credential property: " + SecurityUtil.NN_PRINCIPAL); conf.set(SecurityUtil.NN_PRINCIPAL, nameNodePrincipal); } FileSystem fs = HadoopClientFactory.get().createProxiedFileSystem(conf); fs.exists(new Path("/")); } catch (Exception e) { throw new ValidationException("Invalid storage server or port: " + storageUrl + ", " + e.getMessage(), e); } }
From source file:org.apache.falcon.entity.parser.ClusterEntityParser.java
License:Apache License
protected void validateRegistryInterface(Cluster cluster) throws ValidationException { final boolean isCatalogRegistryEnabled = CatalogServiceFactory.isEnabled(); if (!isCatalogRegistryEnabled) { return; // ignore the registry interface for backwards compatibility }// w w w . ja v a 2s . c o m // continue validation only if a catalog service is provided final Interface catalogInterface = ClusterHelper.getInterface(cluster, Interfacetype.REGISTRY); if (catalogInterface == null) { LOG.info("Catalog service is not enabled for cluster: {}", cluster.getName()); return; } final String catalogUrl = catalogInterface.getEndpoint(); LOG.info("Validating catalog registry interface: {}", catalogUrl); try { Configuration clusterConf = ClusterHelper.getConfiguration(cluster); if (UserGroupInformation.isSecurityEnabled()) { String metaStorePrincipal = clusterConf.get(SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL); Validate.notEmpty(metaStorePrincipal, "Cluster definition missing required metastore credential property: " + SecurityUtil.HIVE_METASTORE_KERBEROS_PRINCIPAL); } if (!CatalogServiceFactory.getCatalogService().isAlive(clusterConf, catalogUrl)) { throw new ValidationException("Unable to reach Catalog server:" + catalogUrl); } } catch (FalconException e) { throw new ValidationException("Invalid Catalog server or port: " + catalogUrl, e); } }
From source file:org.apache.falcon.hadoop.HadoopClientFactory.java
License:Apache License
/** * This method is only used by Falcon internally to talk to the config store on HDFS. * * @param uri file system URI for config store. * @return FileSystem created with the provided proxyUser/group. * @throws org.apache.falcon.FalconException * if the filesystem could not be created. *///ww w . jav a2 s.c o m public FileSystem createFalconFileSystem(final URI uri) throws FalconException { Validate.notNull(uri, "uri cannot be null"); try { Configuration conf = new Configuration(); if (UserGroupInformation.isSecurityEnabled()) { conf.set(SecurityUtil.NN_PRINCIPAL, StartupProperties.get().getProperty(SecurityUtil.NN_PRINCIPAL)); } return createFileSystem(UserGroupInformation.getLoginUser(), uri, conf); } catch (IOException e) { throw new FalconException("Exception while getting FileSystem for: " + uri, e); } }