Example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser

List of usage examples for org.apache.hadoop.security UserGroupInformation createRemoteUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user) 

Source Link

Document

Create a user from a login name.

Usage

From source file:org.springframework.yarn.am.AppmasterCmTemplate.java

License:Apache License

@Override
protected UserGroupInformation getUser() {
    InetSocketAddress rpcAddress = getRpcAddress(getConfiguration());

    // TODO: at some point remove static cache
    Token token = NMTokenCache.getNMToken(container.getNodeId().toString());

    // this is what node manager requires for auth
    UserGroupInformation user = UserGroupInformation
            .createRemoteUser(container.getId().getApplicationAttemptId().toString());
    org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken = ConverterUtils.convertFromYarn(token,
            rpcAddress);/*  w w w .j a v a2 s.  c o m*/
    user.addToken(nmToken);

    return user;
}

From source file:org.starschema.hadoop.yarn.applications.distributedshell.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*from w ww .java  2  s .c o  m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    startTimelineClient(conf);
    if (timelineClient != null) {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START,
                domainId, appSubmitterUgi);
    }

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    for (Container container : previousAMRunningContainers) {
        launchedContainers.add(container.getId());
    }
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
}

From source file:org.trustedanalytics.auth.gateway.hbase.config.HbaseConfiguration.java

License:Apache License

private Connection getInsecuredHBaseClient(Configuration hbaseConf)
        throws InterruptedException, URISyntaxException, LoginException, IOException {
    SystemEnvironment systemEnvironment = new SystemEnvironment();
    Configuration conf = HBaseConfiguration.create(hbaseConf);
    User user = UserProvider.instantiate(hbaseConf).create(
            UserGroupInformation.createRemoteUser(systemEnvironment.getVariable(SystemEnvironment.KRB_USER)));
    return ConnectionFactory.createConnection(conf, user);
}

From source file:org.trustedanalytics.servicebroker.hbase.config.HbaseConfiguration.java

License:Apache License

private Admin getUnsecuredHBaseClient()
        throws InterruptedException, URISyntaxException, LoginException, IOException {

    Configuration conf = HBaseConfiguration.create(hbaseConf);
    User user = UserProvider.instantiate(hbaseConf)
            .create(UserGroupInformation.createRemoteUser(configuration.getUser()));
    return ConnectionFactory.createConnection(conf, user).getAdmin();
}

From source file:origin.hadoop.yarn.distributedshell.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from w  ww. j  a  va2 s .  c o  m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
    try {
        publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END);
    } catch (Exception e) {
        LOG.error("App Attempt start event coud not be pulished for " + appAttemptID.toString(), e);
    }
}

From source file:oz.hadoop.yarn.test.cluster.InJvmContainerExecutor.java

License:Apache License

/**
 *
 * @param container/*from ww w . j a  v a2 s . co m*/
 * @param containerWorkDir
 * @return
 */
private UserGroupInformation buildUgiForContainerLaunching(Container container, final Path containerWorkDir) {
    UserGroupInformation ugi;
    try {
        ugi = UserGroupInformation.createRemoteUser(UserGroupInformation.getLoginUser().getUserName());
        ugi.setAuthenticationMethod(AuthMethod.TOKEN);
        String filePath = new Path(containerWorkDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE).toString();
        Credentials credentials = Credentials.readTokenStorageFile(new File(filePath), this.getConf());
        Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
        for (Token<? extends TokenIdentifier> token : tokens) {
            ugi.addToken(token);
        }
    } catch (Exception e) {
        throw new IllegalArgumentException(
                "Failed to build UserGroupInformation to launch container " + container, e);
    }
    return ugi;
}

From source file:se.kth.climate.fast.netcdfparquet.HDFSImporter.java

License:Open Source License

public void prepare(final String hdfs, final String hdfsPath, final boolean force)
        throws IOException, URISyntaxException, InterruptedException {
    System.setProperty("hadoop.home.dir", "/");
    ugi = UserGroupInformation.createRemoteUser("Test__meb10000");

    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override//from ww w  . j  a v a2  s .co  m
        public Void run() throws Exception {
            LOG.debug("Adding HDFS Config");
            Configuration conf = new Configuration();
            //conf.set("hadoop.job.ugi", "Admin");

            LOG.debug("Connecting to HDFS...");
            fs = FileSystem.get(new URI(hdfs), conf);

            LOG.debug("Getting file status...");
            FileStatus[] status = fs.listStatus(new Path("/Projects/Test"));
            for (int i = 0; i < status.length; i++) {
                LOG.info("In Path: {}", status[i].getPath());
            }

            Path outputFile = new Path(hdfs + hdfsPath);
            Path metaFile = outputFile.suffix("meta");
            sinkFactory = new ParquetSink.Factory(outputFile, uc.noDict);
            OutputStream os = fs.create(metaFile, new Progressable() {
                @Override
                public void progress() {
                    LOG.debug("Sinking Metadata...");
                }
            });
            metaFactory = new AvroSink.StreamFactory(os);
            return null;
        }
    });
}

From source file:se.sics.nstream.hops.hdfs.HDFSComp.java

License:Open Source License

public HDFSComp(Init init) {
    LOG.info("{}init", logPrefix);

    hdfsEndpoint = init.endpoint;/*from ww  w  .  jav a  2s.  c  o  m*/
    hdfsResource = init.resource;
    ugi = UserGroupInformation.createRemoteUser(hdfsEndpoint.user);
    writePos = init.streamPos;

    subscribe(handleStart, control);
    subscribe(handleReadRequest, resourcePort);
    subscribe(handleWriteRequest, resourcePort);
}

From source file:se.sics.nstream.hops.hdfs.HDFSHelperTest.java

License:Open Source License

@Test
public void simpleAppend() throws InterruptedException {
    HDFSEndpoint endpoint = HDFSEndpoint.getBasic("glassfish", "bbc1.sics.se", 26801);
    HDFSResource resource = new HDFSResource("/experiment/download/", "test");
    Random rand = new Random(123);
    byte[] data;//from   www  .jav  a  2  s .c om

    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("glassfish");
    HDFSHelper.delete(ugi, endpoint, resource);
    HDFSHelper.simpleCreate(ugi, endpoint, resource);

    KProfiler kp = new KProfiler(KProfiler.Type.LOG);
    for (int i = 0; i < 100; i++) {
        data = new byte[1024 * 1024];
        rand.nextBytes(data);
        kp.start("hdfs", "append");
        HDFSHelper.append(ugi, endpoint, resource, data);
        kp.end();
    }
    HDFSHelper.delete(ugi, endpoint, resource);
}

From source file:stroom.pipeline.server.writer.HDFSFileAppender.java

License:Apache License

public static UserGroupInformation buildRemoteUser(final Optional<String> runAsUser) {
    final String user = runAsUser.orElseGet(() -> {
        try {//from  w  w  w . jav a2  s  . co m
            return UserGroupInformation.getCurrentUser().getUserName();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    });

    // userGroupInformation =
    // UserGroupInformation.createProxyUser(runAsUser,
    // UserGroupInformation.getLoginUser());
    return UserGroupInformation.createRemoteUser(user);

}