Example usage for org.apache.hadoop.security UserGroupInformation getShortUserName

List of usage examples for org.apache.hadoop.security UserGroupInformation getShortUserName

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getShortUserName.

Prototype

public String getShortUserName() 

Source Link

Document

Get the user's login name.

Usage

From source file:com.cloudera.impala.util.FsPermissionChecker.java

License:Apache License

private FsPermissionChecker() throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    groups_.addAll(Arrays.asList(ugi.getGroupNames()));
    user_ = ugi.getShortUserName();
}

From source file:com.inforefiner.hdata.ApplicationMaster.java

License:Apache License

private static void publishContainerStartEvent(final TimelineClient timelineClient, Container container,
        String domainId, UserGroupInformation ugi) {
    final TimelineEntity entity = new TimelineEntity();
    entity.setEntityId(container.getId().toString());
    entity.setEntityType(DSEntity.DS_CONTAINER.toString());
    entity.setDomainId(domainId);/*from  w w  w.jav  a  2 s . co m*/
    entity.addPrimaryFilter("user", ugi.getShortUserName());
    TimelineEvent event = new TimelineEvent();
    event.setTimestamp(System.currentTimeMillis());
    event.setEventType(DSEvent.DS_CONTAINER_START.toString());
    event.addEventInfo("Node", container.getNodeId().toString());
    event.addEventInfo("Resources", container.getResource().toString());
    entity.addEvent(event);

    try {
        ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() {
            @Override
            public TimelinePutResponse run() throws Exception {
                return timelineClient.putEntities(entity);
            }
        });
    } catch (Exception e) {
        LOG.error("Container start event could not be published for " + container.getId().toString(),
                e instanceof UndeclaredThrowableException ? e.getCause() : e);
    }
}

From source file:com.inforefiner.hdata.ApplicationMaster.java

License:Apache License

private static void publishContainerEndEvent(final TimelineClient timelineClient, ContainerStatus container,
        String domainId, UserGroupInformation ugi) {
    final TimelineEntity entity = new TimelineEntity();
    entity.setEntityId(container.getContainerId().toString());
    entity.setEntityType(DSEntity.DS_CONTAINER.toString());
    entity.setDomainId(domainId);//from   w  w  w .  jav  a  2 s. c o m
    entity.addPrimaryFilter("user", ugi.getShortUserName());
    TimelineEvent event = new TimelineEvent();
    event.setTimestamp(System.currentTimeMillis());
    event.setEventType(DSEvent.DS_CONTAINER_END.toString());
    event.addEventInfo("State", container.getState().name());
    event.addEventInfo("Exit Status", container.getExitStatus());
    entity.addEvent(event);
    try {
        timelineClient.putEntities(entity);
    } catch (YarnException | IOException e) {
        LOG.error("Container end event could not be published for " + container.getContainerId().toString(), e);
    }
}

From source file:com.inforefiner.hdata.ApplicationMaster.java

License:Apache License

private static void publishApplicationAttemptEvent(final TimelineClient timelineClient, String appAttemptId,
        DSEvent appEvent, String domainId, UserGroupInformation ugi) {
    final TimelineEntity entity = new TimelineEntity();
    entity.setEntityId(appAttemptId);//from ww  w  .  jav  a2  s  .co  m
    entity.setEntityType(DSEntity.DS_APP_ATTEMPT.toString());
    entity.setDomainId(domainId);
    entity.addPrimaryFilter("user", ugi.getShortUserName());
    TimelineEvent event = new TimelineEvent();
    event.setEventType(appEvent.toString());
    event.setTimestamp(System.currentTimeMillis());
    entity.addEvent(event);
    try {
        timelineClient.putEntities(entity);
    } catch (YarnException | IOException e) {
        LOG.error("App Attempt " + (appEvent.equals(DSEvent.DS_APP_ATTEMPT_START) ? "start" : "end")
                + " event could not be published for " + appAttemptId.toString(), e);
    }
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test/*  w  w w. j a va 2  s.  co  m*/
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:com.wandisco.s3hdfs.rewrite.filter.S3HdfsTestUtil.java

License:Apache License

S3Service configureS3Service(String host, int proxy) throws IOException, S3ServiceException {
    // configure the service
    Jets3tProperties props = new Jets3tProperties();
    props.setProperty("s3service.disable-dns-buckets", String.valueOf(true));
    props.setProperty("s3service.s3-endpoint", host);
    props.setProperty("s3service.s3-endpoint-http-port", String.valueOf(proxy));
    props.setProperty("s3service.https-only", String.valueOf(false));
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    AWSCredentials creds = new AWSCredentials(ugi.getShortUserName(), "SomeSecretKey", ugi.getUserName());
    return new RestS3Service(creds, null, null, props);
}

From source file:com.yahoo.storm.yarn.StormAMRMClient.java

License:Open Source License

public void launchSupervisorOnContainer(Container container) throws IOException {
    // create a container launch context
    ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class);
    UserGroupInformation user = UserGroupInformation.getCurrentUser();
    try {//w  w w . j  ava2  s  . co  m
        Credentials credentials = user.getCredentials();
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        launchContext.setTokens(securityTokens);
    } catch (IOException e) {
        LOG.warn("Getting current user info failed when trying to launch the container" + e.getMessage());
    }

    // CLC: env
    Map<String, String> env = new HashMap<String, String>();
    env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    launchContext.setEnvironment(env);

    // CLC: local resources includes storm, conf
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    String storm_zip_path = (String) storm_conf.get("storm.zip.path");
    Path zip = new Path(storm_zip_path);
    FileSystem fs = FileSystem.get(hadoopConf);
    String vis = (String) storm_conf.get("storm.zip.visibility");
    if (vis.equals("PUBLIC"))
        localResources.put("storm",
                Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC));
    else if (vis.equals("PRIVATE"))
        localResources.put("storm",
                Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PRIVATE));
    else if (vis.equals("APPLICATION"))
        localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE,
                LocalResourceVisibility.APPLICATION));

    String appHome = Util.getApplicationHomeForId(appAttemptId.toString());
    Path confDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf);
    localResources.put("conf", Util.newYarnAppResource(fs, confDst));

    launchContext.setLocalResources(localResources);

    // CLC: command
    List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf);
    launchContext.setCommands(supervisorArgs);

    try {
        LOG.info("Use NMClient to launch supervisors in container. ");
        nmClient.startContainer(container, launchContext);

        String userShortName = user.getShortUserName();
        if (userShortName != null)
            LOG.info("Supervisor log: http://" + container.getNodeHttpAddress() + "/node/containerlogs/"
                    + container.getId().toString() + "/" + userShortName + "/supervisor.log");
    } catch (Exception e) {
        LOG.error("Caught an exception while trying to start a container", e);
        System.exit(-1);
    }
}

From source file:io.hops.util.GroupMembershipService.java

License:Apache License

private RefreshAdminAclsResponse refreshAdminAcls(boolean checkRMHAState) throws YarnException, IOException {
    String argName = "refreshAdminAcls";
    UserGroupInformation user = checkAcls(argName);

    if (checkRMHAState) {
        checkRMStatus(user.getShortUserName(), argName, "refresh Admin ACLs.");
    }/* w  ww .ja  va 2s  . c o m*/
    Configuration conf = getConfiguration(new Configuration(false),
            YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
    authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation.getCurrentUser());
    RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService");

    return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class);
}

From source file:joshelser.TUGIAssumingProcessor.java

License:Apache License

@Override
public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
    TTransport trans = inProt.getTransport();
    if (!(trans instanceof TSaslServerTransport)) {
        throw new TException("Unexpected non-SASL transport " + trans.getClass());
    }//w w w. j a  v  a  2s  .com
    TSaslServerTransport saslTrans = (TSaslServerTransport) trans;
    SaslServer saslServer = saslTrans.getSaslServer();
    String authId = saslServer.getAuthorizationID();
    String endUser = authId;

    UserGroupInformation clientUgi = null;
    try {
        clientUgi = UserGroupInformation.createProxyUser(endUser, UserGroupInformation.getLoginUser());
        final String remoteUser = clientUgi.getShortUserName();
        log.debug("Executing action as {}", remoteUser);
        return clientUgi.doAs(new PrivilegedExceptionAction<Boolean>() {
            @Override
            public Boolean run() {
                try {
                    return wrapped.process(inProt, outProt);
                } catch (TException te) {
                    throw new RuntimeException(te);
                }
            }
        });
    } catch (RuntimeException rte) {
        if (rte.getCause() instanceof TException) {
            log.error("Failed to invoke wrapped processor", rte.getCause());
            throw (TException) rte.getCause();
        }
        throw rte;
    } catch (InterruptedException | IOException e) {
        log.error("Failed to invoke wrapped processor", e);
        throw new RuntimeException(e);
    } finally {
        if (clientUgi != null) {
            try {
                FileSystem.closeAllForUGI(clientUgi);
            } catch (IOException exception) {
                log.error("Could not clean up file-system handles for UGI: {}", clientUgi, exception);
            }
        }
    }
}

From source file:org.apache.accumulo.proxy.Proxy.java

License:Apache License

public static ServerAddress createProxyServer(HostAndPort address, TProtocolFactory protocolFactory,
        Properties properties, ClientConfiguration clientConf) throws Exception {
    final int numThreads = Integer
            .parseInt(properties.getProperty(THRIFT_THREAD_POOL_SIZE_KEY, THRIFT_THREAD_POOL_SIZE_DEFAULT));
    final long maxFrameSize = AccumuloConfiguration
            .getMemoryInBytes(properties.getProperty(THRIFT_MAX_FRAME_SIZE_KEY, THRIFT_MAX_FRAME_SIZE_DEFAULT));
    final int simpleTimerThreadpoolSize = Integer
            .parseInt(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE.getDefaultValue());
    // How frequently to try to resize the thread pool
    final long threadpoolResizeInterval = 1000l * 5;
    // No timeout
    final long serverSocketTimeout = 0l;
    // Use the new hadoop metrics2 support
    final MetricsFactory metricsFactory = new MetricsFactory(false);
    final String serverName = "Proxy", threadName = "Accumulo Thrift Proxy";

    // create the implementation of the proxy interface
    ProxyServer impl = new ProxyServer(properties);

    // Wrap the implementation -- translate some exceptions
    AccumuloProxy.Iface wrappedImpl = RpcWrapper.service(impl,
            new AccumuloProxy.Processor<AccumuloProxy.Iface>(impl));

    // Create the processor from the implementation
    TProcessor processor = new AccumuloProxy.Processor<>(wrappedImpl);

    // Get the type of thrift server to instantiate
    final String serverTypeStr = properties.getProperty(THRIFT_SERVER_TYPE, THRIFT_SERVER_TYPE_DEFAULT);
    ThriftServerType serverType = DEFAULT_SERVER_TYPE;
    if (!THRIFT_SERVER_TYPE_DEFAULT.equals(serverTypeStr)) {
        serverType = ThriftServerType.get(serverTypeStr);
    }//  w  w w.ja v  a  2s  . co  m

    SslConnectionParams sslParams = null;
    SaslServerConnectionParams saslParams = null;
    switch (serverType) {
    case SSL:
        sslParams = SslConnectionParams.forClient(ClientContext.convertClientConfig(clientConf));
        break;
    case SASL:
        if (!clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: SASL thrift server was requested but it is disabled in client configuration");
            throw new RuntimeException("SASL is not enabled in configuration");
        }

        // Kerberos needs to be enabled to use it
        if (!UserGroupInformation.isSecurityEnabled()) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: Hadoop security is not enabled");
            throw new RuntimeException();
        }

        // Login via principal and keytab
        final String kerberosPrincipal = properties.getProperty(KERBEROS_PRINCIPAL, ""),
                kerberosKeytab = properties.getProperty(KERBEROS_KEYTAB, "");
        if (StringUtils.isBlank(kerberosPrincipal) || StringUtils.isBlank(kerberosKeytab)) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: Kerberos principal and keytab must be provided");
            throw new RuntimeException();
        }
        UserGroupInformation.loginUserFromKeytab(kerberosPrincipal, kerberosKeytab);
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        log.info("Logged in as " + ugi.getUserName());

        // The kerberosPrimary set in the SASL server needs to match the principal we're logged in as.
        final String shortName = ugi.getShortUserName();
        log.info("Setting server primary to {}", shortName);
        clientConf.setProperty(ClientProperty.KERBEROS_SERVER_PRIMARY, shortName);

        KerberosToken token = new KerberosToken();
        saslParams = new SaslServerConnectionParams(clientConf, token, null);

        processor = new UGIAssumingProcessor(processor);

        break;
    default:
        // nothing to do -- no extra configuration necessary
        break;
    }

    // Hook up support for tracing for thrift calls
    TimedProcessor timedProcessor = new TimedProcessor(metricsFactory, processor, serverName, threadName);

    // Create the thrift server with our processor and properties
    ServerAddress serverAddr = TServerUtils.startTServer(serverType, timedProcessor, protocolFactory,
            serverName, threadName, numThreads, simpleTimerThreadpoolSize, threadpoolResizeInterval,
            maxFrameSize, sslParams, saslParams, serverSocketTimeout, address);

    return serverAddr;
}