Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:com.trendmicro.hdfs.webdav.test.TestMoveSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/owner"),
                    new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            FSDataOutputStream os = fs.create(new Path("/test/owner/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);/* w w  w.j av a2s  .c  o m*/
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            os = fs.create(new Path("/test/public/file1"),
                    new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536,
                    null);
            assertNotNull(os);
            os.write(testData.getBytes());
            os.close();
            return null;
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.test.TestPropfindSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            for (Path dir : publicDirPaths) {
                assertTrue(//ww  w.  ja  va  2  s.c o m
                        fs.mkdirs(dir, new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE)));
            }
            for (Path dir : privateDirPaths) {
                assertTrue(fs.mkdirs(dir, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)));
            }
            for (Path path : publicFilePaths) {
                FSDataOutputStream os = fs.create(path,
                        new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1,
                        65536, null);
                assertNotNull(os);
                os.write(testPublicData.getBytes());
                os.close();
            }
            for (Path path : privateFilePaths) {
                FSDataOutputStream os = fs.create(path,
                        new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1,
                        65536, null);
                assertNotNull(os);
                os.write(testPrivateData.getBytes());
                os.close();
            }
            return null;
        }
    });

}

From source file:com.trendmicro.hdfs.webdav.test.TestPutSimple.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = minicluster.getConfiguration();
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups",
            "users");
    conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts",
            "localhost");
    conf.set("hadoop.webdav.authentication.type", "simple");
    conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true);

    minicluster.startMiniCluster(gatewayUser);
    LOG.info("Gateway started on port " + minicluster.getGatewayPort());

    FsPermission.setUMask(conf, new FsPermission((short) 0));

    FileSystem fs = minicluster.getTestFileSystem();
    Path path = new Path("/test");
    assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
    fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]);

    ownerUser.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FileSystem fs = minicluster.getTestFileSystem();
            assertTrue(fs.mkdirs(new Path("/test/rw"),
                    new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/ro"),
                    new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE)));
            assertTrue(fs.mkdirs(new Path("/test/public"),
                    new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)));
            return null;
        }/*from  w ww .  jav  a2 s.c  o  m*/
    });
}

From source file:com.twitter.hraven.hadoopJobMonitor.rpc.ClientCache.java

License:Apache License

protected MRClientProtocol instantiateHistoryProxy() throws IOException {
    final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
    if (StringUtils.isEmpty(serviceAddr)) {
        return null;
    }//from ww  w.j  av  a2  s  .co m
    LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
    final YarnRPC rpc = YarnRPC.create(conf);
    LOG.debug("Connected to HistoryServer at: " + serviceAddr);
    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
        @Override
        public MRClientProtocol run() {
            return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
                    NetUtils.createSocketAddr(serviceAddr), conf);
        }
    });
}

From source file:com.wandisco.s3hdfs.rewrite.filter.S3HdfsFilter.java

License:Apache License

public String getUserName(HttpServletRequest request) {
    UserGroupInformation ugi;//from w  w w .  j  a  v a  2  s.  c  o  m
    try {
        ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        LOG.warn("Current user is not available.", e);
        return null;
    }
    if (fakeName == null) {
        String name = request.getParameter("user.name");
        if (name == null) {
            name = ugi.getUserName();
        }
        return name;
    } else
        return fakeName;
}

From source file:com.wandisco.s3hdfs.rewrite.filter.S3HdfsTestUtil.java

License:Apache License

S3HdfsPath setUpS3HdfsPath(String bucketName, String objectName, String userName, String version,
        String partNumber) throws IOException {
    String user = (userName == null) ? UserGroupInformation.getCurrentUser().getShortUserName() : userName;

    String vers = (version == null) ? DEFAULT_VERSION : null;

    return new S3HdfsPath(s3Directory, user, bucketName, objectName, vers, partNumber);
}

From source file:com.wandisco.s3hdfs.rewrite.filter.S3HdfsTestUtil.java

License:Apache License

S3Service configureS3Service(String host, int proxy) throws IOException, S3ServiceException {
    // configure the service
    Jets3tProperties props = new Jets3tProperties();
    props.setProperty("s3service.disable-dns-buckets", String.valueOf(true));
    props.setProperty("s3service.s3-endpoint", host);
    props.setProperty("s3service.s3-endpoint-http-port", String.valueOf(proxy));
    props.setProperty("s3service.https-only", String.valueOf(false));
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    AWSCredentials creds = new AWSCredentials(ugi.getShortUserName(), "SomeSecretKey", ugi.getUserName());
    return new RestS3Service(creds, null, null, props);
}

From source file:com.yahoo.omid.committable.hbase.HBaseLogin.java

License:Apache License

public static UserGroupInformation loginIfNeeded(Config config) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        LOG.info("Security is enabled, logging in with principal={}, keytab={}", config.getPrincipal(),
                config.getKeytab());/* w ww.j  a  v  a  2  s .  com*/
        UserGroupInformation.loginUserFromKeytab(config.getPrincipal(), config.getKeytab());
    }
    return UserGroupInformation.getCurrentUser();
}

From source file:com.yahoo.storm.yarn.StormAMRMClient.java

License:Open Source License

public void launchSupervisorOnContainer(Container container) throws IOException {
    // create a container launch context
    ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class);
    UserGroupInformation user = UserGroupInformation.getCurrentUser();
    try {/* w  w w  .ja va2 s. co  m*/
        Credentials credentials = user.getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        launchContext.setTokens(securityTokens);
    } catch (IOException e) {
        LOG.warn("Getting current user info failed when trying to launch the container" + e.getMessage());
    }

    // CLC: env
    Map<String, String> env = new HashMap<String, String>();
    env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    launchContext.setEnvironment(env);

    // CLC: local resources includes storm, conf
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    String storm_zip_path = (String) storm_conf.get("storm.zip.path");
    Path zip = new Path(storm_zip_path);
    FileSystem fs = FileSystem.get(hadoopConf);
    String vis = (String) storm_conf.get("storm.zip.visibility");
    if (vis.equals("PUBLIC"))
        localResources.put("storm",
                Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC));
    else if (vis.equals("PRIVATE"))
        localResources.put("storm",
                Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PRIVATE));
    else if (vis.equals("APPLICATION"))
        localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE,
                LocalResourceVisibility.APPLICATION));

    String appHome = Util.getApplicationHomeForId(appAttemptId.toString());
    Path confDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf);
    localResources.put("conf", Util.newYarnAppResource(fs, confDst));

    launchContext.setLocalResources(localResources);

    // CLC: command
    List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf);
    launchContext.setCommands(supervisorArgs);

    try {
        LOG.info("Use NMClient to launch supervisors in container. ");
        nmClient.startContainer(container, launchContext);

        String userShortName = user.getShortUserName();
        if (userShortName != null)
            LOG.info("Supervisor log: http://" + container.getNodeHttpAddress() + "/node/containerlogs/"
                    + container.getId().toString() + "/" + userShortName + "/supervisor.log");
    } catch (Exception e) {
        LOG.error("Caught an exception while trying to start a container", e);
        System.exit(-1);
    }
}

From source file:common.NameNode.java

License:Apache License

/** {@inheritDoc} */
public void create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag,
        boolean createParent, short replication, long blockSize) throws IOException {
    String clientMachine = getClientMachine();
    if (stateChangeLog.isDebugEnabled()) {
        stateChangeLog/*from  w ww.j  a  v a 2  s  . co  m*/
                .debug("*DIR* NameNode.create: file " + src + " for " + clientName + " at " + clientMachine);
    }
    if (!checkPathLength(src)) {
        throw new IOException("create: Pathname too long.  Limit " + MAX_PATH_LENGTH + " characters, "
                + MAX_PATH_DEPTH + " levels.");
    }
    namesystem.startFile(src,
            new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
            clientName, clientMachine, flag.get(), createParent, replication, blockSize);
    myMetrics.numFilesCreated.inc();
    myMetrics.numCreateFileOps.inc();
}