Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:alluxio.yarn.Client.java

License:Apache License

private void setupAppMasterEnv(Map<String, String> appMasterEnv) throws IOException {
    String classpath = ApplicationConstants.Environment.CLASSPATH.name();
    for (String path : mYarnConf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        Apps.addToEnvironment(appMasterEnv, classpath, path.trim(), ApplicationConstants.CLASS_PATH_SEPARATOR);
    }/*from w  w  w . ja v a 2  s . c  o m*/
    Apps.addToEnvironment(appMasterEnv, classpath, PathUtils.concatPath(Environment.PWD.$(), "*"),
            ApplicationConstants.CLASS_PATH_SEPARATOR);

    appMasterEnv.put("ALLUXIO_HOME", ApplicationConstants.Environment.PWD.$());

    if (UserGroupInformation.isSecurityEnabled()) {
        appMasterEnv.put("ALLUXIO_USER", UserGroupInformation.getCurrentUser().getShortUserName());
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private void cancelJhsToken(final Token<? extends TokenIdentifier> t, String userToProxy)
        throws HadoopSecurityManagerException {
    // it appears yarn would clean up this token after app finish, after a long
    // while though.
    org.apache.hadoop.yarn.api.records.Token token = org.apache.hadoop.yarn.api.records.Token
            .newInstance(t.getIdentifier(), t.getKind().toString(), t.getPassword(), t.getService().toString());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress jhsAddress = SecurityUtil.getTokenServiceAddr(t);
    MRClientProtocol jhsProxy = null;/*from w w w.ja v a  2 s.  c  o  m*/
    try {
        jhsProxy = UserGroupInformation.getCurrentUser().doAs(new PrivilegedAction<MRClientProtocol>() {
            @Override
            public MRClientProtocol run() {
                return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, jhsAddress, conf);
            }
        });
        CancelDelegationTokenRequest request = Records.newRecord(CancelDelegationTokenRequest.class);
        request.setDelegationToken(token);
        jhsProxy.cancelDelegationToken(request);
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel token. " + e.getMessage() + e.getCause(), e);
    } finally {
        RPC.stopProxy(jhsProxy);
    }

}

From source file:backup.datanode.DataNodeBackupProcessor.java

License:Apache License

public DataNodeBackupProcessor(Configuration conf, DataNode datanode) throws Exception {
    super(conf);/*from ww w  .  j  ava2  s  .  c o  m*/
    _retryDelay = conf.getLong(DFS_BACKUP_DATANODE_BACKUP_RETRY_DELAY_KEY,
            DFS_BACKUP_DATANODE_RETRY_DELAY_DEFAULT);
    _backupStore = _closer.register(BackupStore.create(BackupUtil.convert(conf)));
    _datanode = datanode;
    _nameNodeClient = new NameNodeClient(conf, UserGroupInformation.getCurrentUser());
}

From source file:backup.namenode.NameNodeBackupServicePlugin.java

License:Apache License

@Override
public void start(Object service) {
    UserGroupInformation ugi;/*  ww w . j  a v  a 2  s.com*/
    try {
        ugi = UserGroupInformation.getCurrentUser();
        LOG.info("Starting NameNodeBackupServicePlugin with ugi {}", ugi);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    Configuration conf = getConf();
    NameNode namenode = (NameNode) service;
    BlockManager blockManager = namenode.getNamesystem().getBlockManager();
    // This object is created here so that it's lifecycle follows the namenode
    try {
        restoreProcessor = SingletonManager.getManager(NameNodeRestoreProcessor.class).getInstance(namenode,
                () -> new NameNodeRestoreProcessor(getConf(), namenode, ugi));
        LOG.info("NameNode Backup plugin setup using UGI {}", ugi);

        NameNodeBackupRPCImpl backupRPCImpl = new NameNodeBackupRPCImpl(blockManager);

        InetSocketAddress listenerAddress = namenode.getServiceRpcAddress();
        int ipcPort = listenerAddress.getPort();
        String bindAddress = listenerAddress.getAddress().getHostAddress();
        int port = conf.getInt(DFS_BACKUP_NAMENODE_RPC_PORT_KEY, DFS_BACKUP_NAMENODE_RPC_PORT_DEFAULT);
        if (port == 0) {
            port = ipcPort + 1;
        }
        server = new RPC.Builder(conf).setBindAddress(bindAddress).setPort(port).setInstance(backupRPCImpl)
                .setProtocol(NameNodeBackupRPC.class).build();
        ServiceAuthorizationManager serviceAuthorizationManager = server.getServiceAuthorizationManager();
        serviceAuthorizationManager.refresh(conf, new BackupPolicyProvider());
        server.start();

        LOG.info("NameNode Backup RPC listening on {}", port);

        int httpPort = getConf().getInt(DFS_BACKUP_NAMENODE_HTTP_PORT_KEY,
                DFS_BACKUP_NAMENODE_HTTP_PORT_DEFAULT);
        if (httpPort != 0) {
            ClassLoader classLoader = getClassLoader();
            if (classLoader != null) {
                ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
                try {
                    BackupWebService<Stats> stats = getBackupWebService(ugi, blockManager, restoreProcessor);

                    // Have to setup classloader in thread context to get the static
                    // files in the http server tp be setup correctly.
                    Thread.currentThread().setContextClassLoader(classLoader);
                    Class<?> backupStatusServerClass = classLoader.loadClass(BACKUP_WEB_BACKUP_WEB_SERVER);

                    Object server = DuckTypeUtil.newInstance(backupStatusServerClass,
                            new Class[] { Integer.TYPE, BackupWebService.class },
                            new Object[] { httpPort, stats });
                    httpServer = DuckTypeUtil.wrap(HttpServer.class, server);
                    httpServer.start();
                    LOG.info("NameNode Backup HTTP listening on {}", httpPort);
                } finally {
                    Thread.currentThread().setContextClassLoader(contextClassLoader);
                }
            } else {
                LOG.info("NameNode Backup HTTP classes not found.");
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:cascading.flow.tez.util.TezUtil.java

License:Open Source License

public static UserGroupInformation getCurrentUser() {
    try {/*w ww .  j  a  va2 s.  co m*/
        return UserGroupInformation.getCurrentUser();
    } catch (IOException exception) {
        throw new CascadingException("unable to get current user", exception);
    }
}

From source file:cn.edu.buaa.act.petuumOnYarn.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*from  w  w  w .  java2 s. c  o  m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer
    // class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from
    // clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers;
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

}

From source file:co.cask.cdap.app.runtime.spark.SparkCredentialsUpdaterTest.java

License:Apache License

@Test
public void testUpdater() throws Exception {
    Location credentialsDir = Locations.toLocation(TEMPORARY_FOLDER.newFolder());

    // Create a updater that don't do any auto-update within the test time and don't cleanup
    SparkCredentialsUpdater updater = new SparkCredentialsUpdater(createCredentialsSupplier(), credentialsDir,
            "credentials", TimeUnit.DAYS.toMillis(1), TimeUnit.DAYS.toMillis(1), Integer.MAX_VALUE) {
        @Override/*from w w w  .ja  v  a2 s.  c  o  m*/
        long getNextUpdateDelay(Credentials credentials) throws IOException {
            return TimeUnit.DAYS.toMillis(1);
        }
    };

    // Before the updater starts, the directory is empty
    Assert.assertTrue(credentialsDir.list().isEmpty());

    UserGroupInformation.getCurrentUser().addToken(
            new Token<>(Bytes.toBytes("id"), Bytes.toBytes("pass"), new Text("kind"), new Text("service")));

    updater.startAndWait();
    try {
        List<Location> expectedFiles = new ArrayList<>();
        expectedFiles.add(credentialsDir.append("credentials-1"));

        for (int i = 1; i <= 10; i++) {
            Assert.assertEquals(expectedFiles, listAndSort(credentialsDir));

            // Read the credentials from the last file
            Credentials newCredentials = new Credentials();
            try (DataInputStream is = new DataInputStream(
                    expectedFiles.get(expectedFiles.size() - 1).getInputStream())) {
                newCredentials.readTokenStorageStream(is);
            }

            // Should contains all tokens of the current user
            Credentials userCredentials = UserGroupInformation.getCurrentUser().getCredentials();
            for (Token<? extends TokenIdentifier> token : userCredentials.getAllTokens()) {
                Assert.assertEquals(token, newCredentials.getToken(token.getService()));
            }

            UserGroupInformation.getCurrentUser().addToken(new Token<>(Bytes.toBytes("id" + i),
                    Bytes.toBytes("pass" + i), new Text("kind" + i), new Text("service" + i)));
            updater.run();
            expectedFiles.add(credentialsDir.append("credentials-" + (i + 1)));
        }
    } finally {
        updater.stopAndWait();
    }
}

From source file:co.cask.cdap.app.runtime.spark.SparkCredentialsUpdaterTest.java

License:Apache License

private Supplier<Credentials> createCredentialsSupplier() {
    return new Supplier<Credentials>() {
        @Override/*from w  ww.  j ava  2s.  c o m*/
        public Credentials get() {
            try {
                return UserGroupInformation.getCurrentUser().getCredentials();
            } catch (Exception e) {
                throw Throwables.propagate(e);
            }
        }
    };
}

From source file:co.cask.cdap.common.guice.FileContextProvider.java

License:Apache License

private UserGroupInformation createUGI() {
    String hdfsUser = cConf.get(Constants.CFG_HDFS_USER);
    try {/*from  w  ww.j  a  v a 2  s . c o m*/
        if (hdfsUser == null || UserGroupInformation.isSecurityEnabled()) {
            if (hdfsUser != null) {
                LOG.debug("Ignoring configuration {}={}, running on secure Hadoop", Constants.CFG_HDFS_USER,
                        hdfsUser);
            }
            LOG.debug("Getting filesystem for current user");
            return UserGroupInformation.getCurrentUser();
        } else {
            LOG.debug("Getting filesystem for user {}", hdfsUser);
            return UserGroupInformation.createRemoteUser(hdfsUser);
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.common.security.YarnTokenUtils.java

License:Apache License

/**
 * Gets a Yarn delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *///  www .  j ava  2 s.  c o  m
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    try {
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(configuration);
        yarnClient.start();

        try {
            Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
            org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient
                    .getRMDelegationToken(renewer);

            // TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
            // CDAP-4825 is resolved
            List<String> services = new ArrayList<>();
            if (HAUtil.isHAEnabled(configuration)) {
                // If HA is enabled, we need to enumerate all RM hosts
                // and add the corresponding service name to the token service
                // Copy the yarn conf since we need to modify it to get the RM addresses
                YarnConfiguration yarnConf = new YarnConfiguration(configuration);
                for (String rmId : HAUtil.getRMHAIds(configuration)) {
                    yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                    InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                    services.add(SecurityUtil.buildTokenService(address).toString());
                }
            } else {
                services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
            }

            Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                    (InetSocketAddress) null);
            token.setService(new Text(Joiner.on(',').join(services)));
            credentials.addToken(new Text(token.getService()), token);

            // OK to log, it won't log the credential, only information about the token.
            LOG.info("Added RM delegation token: {}", token);

        } finally {
            yarnClient.stop();
        }

        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for Yarn.", e);
        throw Throwables.propagate(e);
    }
}