List of usage examples for org.apache.hadoop.security UserGroupInformation doAs
@InterfaceAudience.Public @InterfaceStability.Evolving public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException
From source file:org.apache.sentry.tests.e2e.metastore.TestMetastoreEndToEnd.java
License:Apache License
@Test public void testListTables() throws Exception { List<String> tableNames; HashSet<String> expectedTableNames = new HashSet<>(Arrays.asList(tabName1, tabName2)); // Create databases and verify the admin can list the database names final HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1); dropMetastoreDBIfExists(client, dbName); createMetastoreDB(client, dbName);/*w w w . j a v a 2 s . c o m*/ createMetastoreTable(client, dbName, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", ""))); createMetastoreTable(client, dbName, tabName2, Lists.newArrayList(new FieldSchema("col1", "int", ""))); createMetastoreTable(client, dbName, tabName3, Lists.newArrayList(new FieldSchema("col1", "int", ""))); UserGroupInformation clientUgi = UserGroupInformation.createRemoteUser(ADMIN1); tableNames = clientUgi.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client.getAllTables(dbName); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(3); dropMetastoreDBIfExists(client, dbName2); createMetastoreDB(client, dbName2); createMetastoreTable(client, dbName2, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", ""))); createMetastoreTable(client, dbName2, tabName2, Lists.newArrayList(new FieldSchema("col1", "int", ""))); tableNames = clientUgi.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client.getAllTables(dbName2); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(2); client.close(); // Verify a user with ALL privileges on a database can get its name // and cannot get database name that has no privilege on // USER1_1 has ALL on dbName final HiveMetaStoreClient client_USER1_1 = context.getMetaStoreClient(USER1_1); UserGroupInformation clientUgi_USER1_1 = UserGroupInformation.createRemoteUser(USER1_1); tableNames = clientUgi_USER1_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER1_1.getAllTables(dbName); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(3); tableNames = clientUgi_USER1_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER1_1.getAllTables(dbName2); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(0); // USER2_1 has SELECT on dbName final HiveMetaStoreClient client_USER2_1 = context.getMetaStoreClient(USER2_1); UserGroupInformation clientUgi_USER2_1 = UserGroupInformation.createRemoteUser(USER2_1); tableNames = clientUgi_USER2_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER2_1.getAllTables(dbName); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(3); tableNames = clientUgi_USER2_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER2_1.getAllTables(dbName2); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(0); // USER3_1 has SELECT on dbName.tabName1 and dbName.tabName2 final HiveMetaStoreClient client_USER3_1 = context.getMetaStoreClient(USER3_1); UserGroupInformation clientUgi_USER3_1 = UserGroupInformation.createRemoteUser(USER3_1); tableNames = clientUgi_USER3_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER3_1.getAllTables(dbName); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(2); assertThat(expectedTableNames).contains(tableNames.get(0)); assertThat(expectedTableNames).contains(tableNames.get(1)); tableNames = clientUgi_USER3_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER3_1.getAllTables(dbName2); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(0); client.close(); // USER4_1 ALL on dbName.tabName1 and dbName2 final HiveMetaStoreClient client_USER4_1 = context.getMetaStoreClient(USER4_1); UserGroupInformation clientUgi_USER4_1 = UserGroupInformation.createRemoteUser(USER4_1); tableNames = clientUgi_USER4_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER4_1.getAllTables(dbName); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(1); // only has access to tabName1 and tabName2 assertThat(tableNames.get(0)).isEqualToIgnoringCase(tabName1); tableNames = clientUgi_USER4_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER4_1.getAllTables(dbName2); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(2); client.close(); // USER5_1 CREATE on server final HiveMetaStoreClient client_USER5_1 = context.getMetaStoreClient(USER5_1); UserGroupInformation clientUgi_USER5_1 = UserGroupInformation.createRemoteUser(USER5_1); tableNames = clientUgi_USER5_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER5_1.getAllTables(dbName); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(3); tableNames = clientUgi_USER5_1.doAs(new PrivilegedExceptionAction<List<String>>() { @Override public List<String> run() throws Exception { return client_USER5_1.getAllTables(dbName2); } }); assertThat(tableNames).isNotNull(); assertThat(tableNames.size()).isEqualTo(2); client.close(); }
From source file:org.apache.slider.server.appmaster.rpc.RpcBinder.java
License:Apache License
public static SliderClusterProtocol getProxy(final Configuration conf, ApplicationReport application, final int rpcTimeout) throws IOException, SliderException, InterruptedException { String host = application.getHost(); int port = application.getRpcPort(); String address = host + ":" + port; if (host == null || 0 == port) { throw new SliderException(SliderExitCodes.EXIT_CONNECTIVITY_PROBLEM, "Slider instance " + application.getName() + " isn't providing a valid address for the" + " Slider RPC protocol: " + address); }/*from www .j ava2s .c om*/ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); final UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(currentUser.getUserName()); final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(), application.getRpcPort()); SliderClusterProtocol realProxy; log.debug("Connecting to {}", serviceAddr); if (UserGroupInformation.isSecurityEnabled()) { org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken(); Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr); newUgi.addToken(token); realProxy = newUgi.doAs(new PrivilegedExceptionAction<SliderClusterProtocol>() { @Override public SliderClusterProtocol run() throws IOException { return connectToServer(serviceAddr, newUgi, conf, rpcTimeout); } }); } else { return connectToServer(serviceAddr, newUgi, conf, rpcTimeout); } return realProxy; }
From source file:org.apache.storm.hdfs.security.AutoHDFS.java
License:Apache License
@SuppressWarnings("unchecked") private byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration) { try {//from w w w .j ava2 s . c o m if (UserGroupInformation.isSecurityEnabled()) { login(configuration); final String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL); final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI) ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString()) : FileSystem.getDefaultUri(configuration); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser, ugi); Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() { @Override public Object run() { try { FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration); Credentials credential = proxyUser.getCredentials(); if (configuration.get(STORM_USER_NAME_KEY) == null) { configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal); } fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential); LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser); return credential; } catch (IOException e) { throw new RuntimeException(e); } } }); ByteArrayOutputStream bao = new ByteArrayOutputStream(); ObjectOutputStream out = new ObjectOutputStream(bao); creds.write(out); out.flush(); out.close(); return bao.toByteArray(); } else { throw new RuntimeException("Security is not enabled for HDFS"); } } catch (Exception ex) { throw new RuntimeException("Failed to get delegation tokens.", ex); } }
From source file:org.apache.storm.hdfs.security.AutoHDFSNimbus.java
License:Apache License
@SuppressWarnings("unchecked") private byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration, final String topologySubmitterUser) { try {//from w w w. j a v a 2s . c o m if (UserGroupInformation.isSecurityEnabled()) { login(configuration); final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI) ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString()) : FileSystem.getDefaultUri(configuration); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser, ugi); Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() { @Override public Object run() { try { FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration); Credentials credential = proxyUser.getCredentials(); if (configuration.get(STORM_USER_NAME_KEY) == null) { configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal); } fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential); LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser); return credential; } catch (IOException e) { throw new RuntimeException(e); } } }); ByteArrayOutputStream bao = new ByteArrayOutputStream(); ObjectOutputStream out = new ObjectOutputStream(bao); creds.write(out); out.flush(); out.close(); return bao.toByteArray(); } else { throw new RuntimeException("Security is not enabled for HDFS"); } } catch (Exception ex) { throw new RuntimeException("Failed to get delegation tokens.", ex); } }
From source file:org.apache.streams.hdfs.WebHdfsPersistReader.java
License:Apache License
private synchronized void connectToWebHDFS() { try {/*from w w w.j a v a2s. c o m*/ LOGGER.info("User : {}", this.hdfsConfiguration.getUser()); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(this.hdfsConfiguration.getUser()); ugi.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); LOGGER.info("WebURI : {}", getURI().toString()); client = FileSystem.get(getURI(), conf); LOGGER.info("Connected to WebHDFS"); /* * ************************************************************************************************ * This code is an example of how you would work with HDFS and you weren't going over * the webHDFS protocol. * * Smashew: 2013-10-01 * ************************************************************************************************ conf.set("fs.defaultFS", "hdfs://hadoop.mdigitallife.com:8020/user/" + userName); conf.set("namenode.host","0.0.0.0"); conf.set("hadoop.job.ugi", userName); conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "runner"); fileSystem.createNewFile(new Path("/user/"+ userName + "/test")); FileStatus[] status = fs.listStatus(new Path("/user/" + userName)); for(int i=0;i<status.length;i++) { LOGGER.info("Directory: {}", status[i].getPath()); } */ return null; } }); } catch (Exception e) { LOGGER.error("There was an error connecting to WebHDFS, please check your settings and try again"); e.printStackTrace(); } }
From source file:org.apache.streams.hdfs.WebHdfsPersistWriter.java
License:Apache License
private synchronized void connectToWebHDFS() { try {/*from w w w . j a va2 s . c om*/ LOGGER.info("User : {}", this.hdfsConfiguration.getUser()); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(this.hdfsConfiguration.getUser()); ugi.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); LOGGER.info("WebURI : {}", getURI().toString()); client = FileSystem.get(getURI(), conf); LOGGER.info("Connected to WebHDFS"); /* * ************************************************************************************************ * This code is an example of how you would work with HDFS and you weren't going over * the webHDFS protocol. * * Smashew: 2013-10-01 * ************************************************************************************************ conf.set("fs.defaultFS", "hdfs://hadoop.mdigitallife.com:8020/user/" + userName); conf.set("namenode.host","0.0.0.0"); conf.set("hadoop.job.ugi", userName); conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "runner"); fileSystem.createNewFile(new Path("/user/"+ userName + "/test")); FileStatus[] status = fs.listStatus(new Path("/user/" + userName)); for(int i=0;i<status.length;i++) { LOGGER.info("Directory: {}", status[i].getPath()); } */ return null; } }); } catch (Exception e) { LOGGER.error("There was an error connecting to WebHDFS, please check your settings and try again", e); throw new RuntimeException(e); } }
From source file:org.apache.tajo.master.rm.YarnTajoResourceManager.java
License:Apache License
@Override public void init(Configuration conf) { this.conf = conf; connectYarnClient();//ww w. ja v a 2 s . co m final YarnConfiguration yarnConf = new YarnConfiguration(conf); final YarnRPC rpc = YarnRPC.create(conf); final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); UserGroupInformation currentUser; try { currentUser = UserGroupInformation.getCurrentUser(); } catch (IOException e) { throw new YarnRuntimeException(e); } rmClient = currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() { @Override public ApplicationMasterProtocol run() { return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rmAddress, yarnConf); } }); }
From source file:org.apache.tajo.master.TaskRunnerLauncherImpl.java
License:Apache License
protected ContainerManager getCMProxy(ContainerId containerID, final String containerManagerBindAddr, ContainerToken containerToken) throws IOException { String[] hosts = containerManagerBindAddr.split(":"); final InetSocketAddress cmAddr = new InetSocketAddress(hosts[0], Integer.parseInt(hosts[1])); UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (UserGroupInformation.isSecurityEnabled()) { Token<ContainerTokenIdentifier> token = ProtoUtils.convertFromProtoFormat(containerToken, cmAddr); // the user in createRemoteUser in this context has to be ContainerID user = UserGroupInformation.createRemoteUser(containerID.toString()); user.addToken(token);/*from ww w .j av a 2 s . co m*/ } ContainerManager proxy = user.doAs(new PrivilegedAction<ContainerManager>() { @Override public ContainerManager run() { return (ContainerManager) yarnRPC.getProxy(ContainerManager.class, cmAddr, getConfig()); } }); return proxy; }
From source file:org.apache.tajo.master.YarnContainerProxy.java
License:Apache License
protected ContainerManagementProtocol getCMProxy(ContainerId containerID, final String containerManagerBindAddr, Token containerToken) throws IOException { String[] hosts = containerManagerBindAddr.split(":"); final InetSocketAddress cmAddr = new InetSocketAddress(hosts[0], Integer.parseInt(hosts[1])); UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (UserGroupInformation.isSecurityEnabled()) { org.apache.hadoop.security.token.Token<ContainerTokenIdentifier> token = ConverterUtils .convertFromYarn(containerToken, cmAddr); // the user in createRemoteUser in this context has to be ContainerID user = UserGroupInformation.createRemoteUser(containerID.toString()); user.addToken(token);/*from w w w. java 2 s . co m*/ } ContainerManagementProtocol proxy = user.doAs(new PrivilegedAction<ContainerManagementProtocol>() { @Override public ContainerManagementProtocol run() { return (ContainerManagementProtocol) yarnRPC.getProxy(ContainerManagementProtocol.class, cmAddr, conf); } }); return proxy; }
From source file:org.apache.tez.client.TezClientUtils.java
License:Apache License
@Private public static DAGClientAMProtocolBlockingPB getAMProxy(final Configuration conf, String amHost, int amRpcPort, org.apache.hadoop.yarn.api.records.Token clientToAMToken) throws IOException { final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(amHost, amRpcPort); UserGroupInformation userUgi = UserGroupInformation .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()); if (clientToAMToken != null) { Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr); userUgi.addToken(token);/* w w w.j a v a 2s. c o m*/ } if (LOG.isDebugEnabled()) { LOG.debug("Connecting to Tez AM at " + serviceAddr); } DAGClientAMProtocolBlockingPB proxy = null; try { proxy = userUgi.doAs(new PrivilegedExceptionAction<DAGClientAMProtocolBlockingPB>() { @Override public DAGClientAMProtocolBlockingPB run() throws IOException { RPC.setProtocolEngine(conf, DAGClientAMProtocolBlockingPB.class, ProtobufRpcEngine.class); return (DAGClientAMProtocolBlockingPB) RPC.getProxy(DAGClientAMProtocolBlockingPB.class, 0, serviceAddr, conf); } }); } catch (InterruptedException e) { throw new IOException("Failed to connect to AM", e); } return proxy; }