Example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser

List of usage examples for org.apache.hadoop.security UserGroupInformation createRemoteUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user) 

Source Link

Document

Create a user from a login name.

Usage

From source file:io.hops.tensorflow.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException// w w w  .ja v  a  2  s . co  m
 * @throws IOException
 */
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster. " + "Workers: " + numWorkers + ", Parameter servers: " + numPses);

    clusterSpecServer = new ClusterSpecGeneratorServer(appAttemptID.getApplicationId().toString(),
            numTotalContainers, numWorkers);
    LOG.info("Starting ClusterSpecGeneratorServer");
    int port = 2222;
    while (true) {
        try {
            clusterSpecServer.start(port);
            break;
        } catch (IOException e) {
            port++;
        }
    }
    environment.put("YARNTF_AM_ADDRESS", InetAddress.getLocalHost().getHostName() + ":" + port);
    environment.put("YARNTF_APPLICATION_ID", appAttemptID.getApplicationId().toString());

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    rmWrapper = new RMWrapper(this);
    rmWrapper.getClient().init(conf);
    rmWrapper.getClient().start();

    nmWrapper = new NMWrapper(this);
    nmWrapper.getClient().init(conf);
    nmWrapper.getClient().start();

    timelineHandler = new TimelineHandler(appAttemptID.toString(), domainId, appSubmitterUgi);
    timelineHandler.startClient(conf);
    if (timelineHandler.isClientNotNull()) {
        timelineHandler.publishApplicationAttemptEvent(YarntfEvent.YARNTF_APP_ATTEMPT_START);
    }

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    appMasterTrackingUrl = InetAddress.getLocalHost().getHostName() + ":" + TensorBoardServer.spawn(this);
    RegisterApplicationMasterResponse response = rmWrapper.getClient()
            .registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    int maxGPUS = response.getMaximumResourceCapability().getGPUs();
    LOG.info("Max gpus capabililty of resources in this cluster " + maxGPUS);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    if (containerGPUs > maxGPUS) {
        LOG.info("Container gpus specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerGPUs + ", max=" + maxGPUS);
        containerGPUs = maxGPUS;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    // Stop eventual containers from previous attempts
    for (Container prevContainer : previousAMRunningContainers) {
        LOG.info("Releasing YARN container " + prevContainer.getId());
        rmWrapper.getClient().releaseAssignedContainer(prevContainer.getId());
    }

    // Send request for containers to RM
    for (int i = 0; i < numWorkers; i++) {
        ContainerRequest workerRequest = setupContainerAskForRM(true);
        rmWrapper.getClient().addContainerRequest(workerRequest);
    }
    numRequestedContainers.addAndGet(numWorkers);
    for (int i = 0; i < numPses; i++) {
        ContainerRequest psRequest = setupContainerAskForRM(false);
        rmWrapper.getClient().addContainerRequest(psRequest);
    }
    numRequestedContainers.addAndGet(numPses);
}

From source file:me.haosdent.noya.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws org.apache.hadoop.yarn.exceptions.YarnException
 * @throws java.io.IOException//w w  w.  j  a v a 2s.  co m
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info("Received " + previousAMRunningContainers.size()
            + " previous AM's running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainersToRequest);
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }
}

From source file:ml.shifu.guagua.yarn.GuaguaAppMaster.java

License:Apache License

/**
 * Application entry point/* w  ww .j av  a  2s.c  o m*/
 * 
 * @param args
 *            command-line args (set by GuaguaYarnClient, if any)
 */
public static void main(final String[] args) {
    LOG.info("Starting GuaguaAppMaster. ");
    String containerIdString = System.getenv().get(Environment.CONTAINER_ID.name());
    if (containerIdString == null) {
        // container id should always be set in the env by the framework
        throw new IllegalArgumentException("ContainerId not found in env vars.");
    }
    ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
    ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId();
    Configuration conf = new YarnConfiguration();
    String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    conf.set(MRJobConfig.USER_NAME, jobUserName);
    try {
        UserGroupInformation.setConfiguration(conf);
        // Security framework already loaded the tokens into current UGI, just use them
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        LOG.info("Executing with tokens:");
        for (Token<?> token : credentials.getAllTokens()) {
            LOG.info(token.toString());
        }

        UserGroupInformation appMasterUgi = UserGroupInformation.createRemoteUser(jobUserName);
        appMasterUgi.addCredentials(credentials);

        // Now remove the AM->RM token so tasks don't have it
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }

        final GuaguaAppMaster appMaster = new GuaguaAppMaster(containerId, appAttemptId, conf);
        appMasterUgi.doAs(new PrivilegedAction<Void>() {
            @Override
            public Void run() {
                boolean result = false;
                try {
                    result = appMaster.run();
                } catch (Throwable t) {
                    LOG.error("GuaguaAppMaster caught a top-level exception in main.", t);
                    System.exit(1);
                }

                if (result) {
                    LOG.info("Guagua Application Master completed successfully. exiting");
                    System.exit(0);
                } else {
                    LOG.info("Guagua Application Master failed. exiting");
                    System.exit(2);
                }
                return null;
            }
        });

    } catch (Throwable t) {
        LOG.error("GuaguaAppMaster caught a top-level exception in main.", t);
        System.exit(1);
    }
}

From source file:ml.shifu.guagua.yarn.GuaguaYarnTask.java

License:Apache License

public static void main(String[] args) {
    LOG.info("args:{}", Arrays.toString(args));
    if (args.length != 7) {
        throw new IllegalStateException(String.format(
                "GuaguaYarnTask could not construct a TaskAttemptID for the Guagua job from args: %s",
                Arrays.toString(args)));
    }/*from   w ww . j  a  v  a 2 s.com*/

    String containerIdString = System.getenv().get(Environment.CONTAINER_ID.name());
    if (containerIdString == null) {
        // container id should always be set in the env by the framework
        throw new IllegalArgumentException("ContainerId not found in env vars.");
    }
    ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
    ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId();

    try {
        Configuration conf = new YarnConfiguration();
        String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());
        conf.set(MRJobConfig.USER_NAME, jobUserName);
        UserGroupInformation.setConfiguration(conf);
        // Security framework already loaded the tokens into current UGI, just use them
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        LOG.info("Executing with tokens:");
        for (Token<?> token : credentials.getAllTokens()) {
            LOG.info(token.toString());
        }

        UserGroupInformation appTaskUGI = UserGroupInformation.createRemoteUser(jobUserName);
        appTaskUGI.addCredentials(credentials);
        @SuppressWarnings("rawtypes")
        final GuaguaYarnTask<?, ?> guaguaYarnTask = new GuaguaYarnTask(appAttemptId, containerId,
                Integer.parseInt(args[args.length - 3]), args[args.length - 2], args[args.length - 1], conf);
        appTaskUGI.doAs(new PrivilegedAction<Void>() {
            @Override
            public Void run() {
                guaguaYarnTask.run();
                return null;
            }
        });
    } catch (Throwable t) {
        LOG.error("GuaguaYarnTask threw a top-level exception, failing task", t);
        System.exit(2);
    }
    System.exit(0);
}

From source file:net.sf.jfilesync.plugins.net.items.THdfs_plugin.java

License:Apache License

/**
 * There is a bug here. When user disconnect and then connect again, user will login as the user last time (the conData changed for the second time).
 * Even though I have changed my login user name for the second time. But If I restart the app, it works well.
 * FIXED! http://stackoverflow.com/questions/15941108/hdfs-access-from-remote-host-through-java-api-user-authentication
 *//*from  ww w  .j av a2s  . com*/
@Override
public void connect(TConnectionData connectData) throws PluginConnectException {
    this.conData = connectData;
    final String hostname = conData.getHost();
    final int port = conData.getPort();
    final String uri = "hdfs://" + hostname + ":" + port;
    final String username = conData.getUser();
    //       System.setProperty("HADOOP_USER_NAME", username);
    //       conf = new Configuration();

    LOGGER.info("trying to connect to :" + hostname);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(username);
    try {
        ugi.doAs(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                conf = new Configuration();
                conf.set("hadoop.job.ugi", username);
                fs = FileSystem.get(URI.create(uri), conf);
                return null;
            }

        });
    } catch (Exception e) {
        e.printStackTrace();
        throw new PluginConnectException(TErrorHandling.ERROR_CONNECTION_FAILURE, e.getMessage());
    }
    LOGGER.info("hdfs connect done");
}

From source file:org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier.java

License:Apache License

@Override
public UserGroupInformation getUser() {
    if (null != impl && impl.isSetPrincipal()) {
        return UserGroupInformation.createRemoteUser(impl.getPrincipal());
    }/*  www. j  a va 2s.  co m*/
    return null;
}

From source file:org.apache.accumulo.core.clientImpl.AuthenticationTokenIdentifier.java

License:Apache License

@Override
public UserGroupInformation getUser() {
    if (impl != null && impl.isSetPrincipal()) {
        return UserGroupInformation.createRemoteUser(impl.getPrincipal());
    }/* w  ww . j  av a2 s.  co  m*/
    return null;
}

From source file:org.apache.accumulo.core.security.AuthenticationTokenIdentifierTest.java

License:Apache License

@Test
public void testUgi() {
    String principal = "user";
    AuthenticationTokenIdentifier token = new AuthenticationTokenIdentifier(principal);
    UserGroupInformation actual = token.getUser(), expected = UserGroupInformation.createRemoteUser(principal);
    assertEquals(expected.getAuthenticationMethod(), actual.getAuthenticationMethod());
    assertEquals(expected.getUserName(), expected.getUserName());
}

From source file:org.apache.ambari.view.filebrowser.HdfsApi.java

License:Apache License

private UserGroupInformation getProxyUser() throws IOException {
    UserGroupInformation proxyuser;/*from  ww  w .  ja  v a2s.co m*/
    if (params.containsKey("proxyuser")) {
        proxyuser = UserGroupInformation.createRemoteUser(params.get("proxyuser"));
    } else {
        proxyuser = UserGroupInformation.getCurrentUser();
    }

    proxyuser.setAuthenticationMethod(getAuthenticationMethod());
    return proxyuser;
}

From source file:org.apache.ambari.view.utils.hdfs.HdfsApi.java

License:Apache License

private UserGroupInformation getProxyUser() throws IOException {
    UserGroupInformation proxyuser;//www .  java 2  s. c o m
    if (authParams.containsKey("proxyuser")) {
        proxyuser = UserGroupInformation.createRemoteUser(authParams.get("proxyuser"));
    } else {
        proxyuser = UserGroupInformation.getCurrentUser();
    }

    proxyuser.setAuthenticationMethod(getAuthenticationMethod());
    return proxyuser;
}