List of usage examples for org.apache.hadoop.security UserGroupInformation createRemoteUser
@InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation createRemoteUser(String user)
From source file:org.apache.atlas.web.resources.EntityJerseyResourceIT.java
License:Apache License
@Test public void testRequestUser() throws Exception { Referenceable entity = new Referenceable(DATABASE_TYPE); entity.set("name", randomString()); entity.set("description", randomString()); String user = "testuser"; UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); AtlasClient localClient = new AtlasClient(ugi, null, baseUrl); String entityId = localClient.createEntity(entity).get(0); List<EntityAuditEvent> events = serviceClient.getEntityAuditEvents(entityId, (short) 10); assertEquals(events.size(), 1);// w w w . ja v a 2s . c om assertEquals(events.get(0).getUser(), user); }
From source file:org.apache.atlas.web.security.AtlasAbstractAuthenticationProvider.java
License:Apache License
public static List<GrantedAuthority> getAuthoritiesFromUGI(String userName) { List<GrantedAuthority> grantedAuths = new ArrayList<GrantedAuthority>(); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userName); if (ugi != null) { String[] userGroups = ugi.getGroupNames(); if (userGroups != null) { for (String group : userGroups) { grantedAuths.add(new SimpleGrantedAuthority(group)); }/* w w w . j a v a 2 s . c o m*/ } } // if group empty take groups from UGI LDAP-based group mapping if (grantedAuths != null && grantedAuths.isEmpty()) { try { Configuration config = new Configuration(); Groups gp = new Groups(config); List<String> userGroups = gp.getGroups(userName); if (userGroups != null) { for (String group : userGroups) { grantedAuths.add(new SimpleGrantedAuthority(group)); } } } catch (java.io.IOException e) { LOG.error("Exception while fetching groups ", e); } } return grantedAuths; }
From source file:org.apache.blur.command.ExportCommand.java
License:Apache License
@Override public Long execute(final IndexContext context) throws IOException, InterruptedException { // get our blurQuery back blurQuery = mapper.readValue(blurQueryString, BlurQuery.class); final TableContext tableContext = context.getTableContext(); final FieldManager fieldManager = tableContext.getFieldManager(); final org.apache.blur.thrift.generated.Query simpleQuery = blurQuery.query; final boolean rowQuery = simpleQuery.rowQuery; final Term defaultPrimeDocTerm = tableContext.getDefaultPrimeDocTerm(); // TODO: get filters working Filter queryFilter = null;//from w ww . j av a 2s . c o m // TODO: get columnFetch to work final ScoreType scoreType = ScoreType.CONSTANT; // have a query to run, setup file to output to: String shard = context.getShard().getShard(); String uuid = blurQuery.uuid; final Path path = new Path(destUri, uuid + "-" + shard + ".json.gz"); final byte[] newLine = new String("\n").getBytes(); final AtomicLong exported = new AtomicLong(0); LOG.info("start shard: " + shard); UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user); remoteUser.doAs(new PrivilegedExceptionAction<Long>() { public Long run() throws Exception { // setup query Query query; try { // query = parser.parse(queryStr); query = QueryParserUtil.parseQuery(simpleQuery.query, simpleQuery.rowQuery, fieldManager, null, null, scoreType, tableContext); } catch (ParseException e) { throw new IOException("query could not be parsed correctly", e); } // setup storage with existing conf FileSystem fs = FileSystem.get(tableContext.getConfiguration()); final OutputStream outputStream = new GZIPOutputStream(fs.create(path, true)); IndexSearcherCloseable indexSearcher = context.getIndexSearcher(); indexSearcher.search(query, new Collector() { private AtomicReader _reader; private OpenBitSet _primeDocBitSet; private Bits _liveDocs; @Override public void collect(int doc) throws IOException { // doc equals primedoc in super query Row row = null; if (rowQuery) { int nextPrimeDoc = _primeDocBitSet.nextSetBit(doc + 1); for (int d = doc; d < nextPrimeDoc; d++) { // was our document marked for deletion? if (_liveDocs != null && !_liveDocs.get(d)) { continue; } Document document = _reader.document(d); BlurThriftRecord record = new BlurThriftRecord(); String rowId = RowDocumentUtil.readRecord(document, record); row = new Row(rowId, record); } } else { Document document = _reader.document(doc); BlurThriftRecord record = new BlurThriftRecord(); String rowId = RowDocumentUtil.readRecord(document, record); row = new Row(rowId, record); } // record has now been populated... String json = mapper.writeValueAsString(row); // LOG.info(json); outputStream.write(json.getBytes()); outputStream.write(newLine); exported.incrementAndGet(); } @Override public void setNextReader(AtomicReaderContext context) throws IOException { _reader = context.reader(); _liveDocs = _reader.getLiveDocs(); _primeDocBitSet = PrimeDocCache.getPrimeDocBitSet(defaultPrimeDocTerm, _reader); } @Override public void setScorer(Scorer scorer) throws IOException { } @Override public boolean acceptsDocsOutOfOrder() { return false; } }); outputStream.flush(); outputStream.close(); // unused return exported.get(); } }); LOG.info("complete shard: " + shard + " exported: " + exported.get()); return exported.get(); }
From source file:org.apache.blur.command.TableCopyCommand.java
License:Apache License
@Override public Long execute(IndexContext context) throws IOException { final Configuration configuration = context.getTableContext().getConfiguration(); final IndexReader indexReader = context.getIndexReader(); final Shard shard = context.getShard(); UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user); try {/*from ww w . j av a 2 s . c o m*/ return remoteUser.doAs(new PrivilegedExceptionAction<Long>() { @Override public Long run() throws Exception { Path path = new Path(destUri); Directory srcDirectory = getDiretory(indexReader); HdfsDirectory destDirectory = new HdfsDirectory(configuration, new Path(path, shard.getShard())); long total = 0; for (String srcFile : srcDirectory.listAll()) { if (destDirectory.fileExists(srcFile)) { LOG.info("File [{0}] already exists in dest directory."); long srcFileLength = srcDirectory.fileLength(srcFile); long destFileLength = destDirectory.fileLength(srcFile); if (srcFileLength != destFileLength) { LOG.info("Deleting file [{0}] length of [{1}] is not same as source [{2}].", srcFile, srcFileLength, destFileLength); destDirectory.deleteFile(srcFile); } else { continue; } } LOG.info("Copying file [{0}] to dest directory.", srcFile); total += copy(srcFile, srcDirectory, destDirectory); } return total; } }); } catch (InterruptedException e) { throw new IOException(e); } }
From source file:org.apache.blur.hive.BlurHiveOutputFormat.java
License:Apache License
public static UserGroupInformation getUGI(final Configuration configuration) throws IOException { String user = getBlurUser(configuration); UserGroupInformation userGroupInformation; UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); if (user.equals(currentUser.getUserName())) { userGroupInformation = currentUser; } else {// ww w.ja va2 s . c om if (BlurHiveOutputFormat.isBlurUserAsProxy(configuration)) { userGroupInformation = UserGroupInformation.createProxyUser(user, currentUser); } else { userGroupInformation = UserGroupInformation.createRemoteUser(user); } } return userGroupInformation; }
From source file:org.apache.blur.store.MessingWithPermissions.java
License:Apache License
public static void main(String[] args) throws IOException, InterruptedException { UserGroupInformation blur = UserGroupInformation.createRemoteUser("blur"); final Path path = new Path("/permission_test/tables"); final Configuration configuration = new Configuration(); blur.doAs(new PrivilegedExceptionAction<Void>() { @Override/* w ww .j ava 2 s. c o m*/ public Void run() throws Exception { FileSystem fileSystem = path.getFileSystem(configuration); FileStatus[] listStatus = fileSystem.listStatus(path); for (FileStatus status : listStatus) { System.out.println(status.getPath()); } return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.HDFSAccessControlEnforcerTest.java
License:Apache License
@org.junit.Test public void customPermissionsTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser" final Path file = new Path("/tmp/tmpdir/data-file2"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();//from w ww . j a va 2 s . c om } out.close(); // Now try to read the file as "bob" - this should be allowed UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob"); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); String content = new String(output.toByteArray()); Assert.assertTrue(content.startsWith("data0")); fs.close(); return null; } }); // Now try to read the file as "eve" - this should not be allowed ugi = UserGroupInformation.createRemoteUser("eve"); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file try { fs.open(file); Assert.fail("Failure expected on an incorrect permission"); } catch (AccessControlException ex) { // expected } fs.close(); return null; } }); // Write to the file as the owner, this should be allowed out = fileSystem.append(file); out.write(("new data\n").getBytes("UTF-8")); out.flush(); out.close(); // Now try to write to the file as "bob" - this should not be allowed ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { fs.append(file); Assert.fail("Failure expected on an incorrect permission"); } catch (AccessControlException ex) { // expected } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java
License:Apache License
@org.junit.Test public void defaultPermissionsTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file final Path file = new Path("/tmp/tmpdir/data-file2"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();/*from www .j a v a 2s.com*/ } out.close(); // Check status // FileStatus status = fileSystem.getFileStatus(file); // System.out.println("OWNER: " + status.getOwner()); // System.out.println("GROUP: " + status.getGroup()); // System.out.println("PERM: " + status.getPermission().toString()); // fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE)); // fileSystem.setOwner(file, "bob", null); // Now try to read the file as "bob" - this should be allowed UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob"); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); String content = new String(output.toByteArray()); Assert.assertTrue(content.startsWith("data0")); fs.close(); return null; } }); // Write to the file as the owner, this should be allowed out = fileSystem.append(file); out.write(("new data\n").getBytes("UTF-8")); out.flush(); out.close(); // Now try to write to the file as "bob" - this should not be allowed ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { fs.append(file); Assert.fail("Failure expected on an incorrect permission"); } catch (AccessControlException ex) { // expected } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java
License:Apache License
@org.junit.Test public void testChangedPermissionsTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file final Path file = new Path("/tmp/tmpdir/data-file3"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();/* w w w. j a v a 2 s . co m*/ } out.close(); // Change permissions to read-only fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE)); // Now try to read the file as "bob" - this should fail UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob"); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Read the file try { FSDataInputStream in = fs.open(file); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(in, output); Assert.fail("Failure expected on an incorrect permission"); } catch (AccessControlException ex) { // expected } fs.close(); return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java
License:Apache License
@org.junit.Test public void testDirectoryPermissions() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file final Path file = new Path("/tmp/tmpdir/data-file4"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();//from www .jav a 2s . c om } out.close(); // Try to read the directory as "bob" - this should be allowed UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob"); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); fs.close(); return null; } }); // Change permissions so that the directory can't be read by "other" fileSystem.setPermission(file.getParent(), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE)); // Try to read the base directory as the file owner RemoteIterator<LocatedFileStatus> iter = fileSystem.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); // Now try to read the directory as "bob" again - this should fail ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); try { RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); Assert.fail("Failure expected on an incorrect permission"); } catch (AccessControlException ex) { // expected } fs.close(); return null; } }); }