Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:co.cask.cdap.security.impersonation.DefaultImpersonator.java

License:Apache License

private UserGroupInformation getUGI(ImpersonationInfo impersonationInfo) throws IOException {
    // no need to get a UGI if the current UGI is the one we're requesting; simply return it
    String configuredPrincipalShortName = new KerberosName(impersonationInfo.getPrincipal()).getShortName();
    if (UserGroupInformation.getCurrentUser().getShortUserName().equals(configuredPrincipalShortName)) {
        return UserGroupInformation.getCurrentUser();
    }/*from w  w w  .  ja  v  a 2 s  .  c om*/
    return ugiProvider.getConfiguredUGI(impersonationInfo);
}

From source file:co.cask.hydrator.common.batch.JobUtils.java

License:Apache License

/**
 * Creates a new instance of {@link Job}. Note that the job created is not meant for actual MR
 * submission. It's just for setting up configurations.
 *//* w  w  w  .  j av  a  2 s .c o m*/
public static Job createInstance() throws IOException {
    Job job = Job.getInstance();
    Configuration conf = job.getConfiguration();
    conf.clear();

    if (UserGroupInformation.isSecurityEnabled()) {
        // If runs in secure cluster, this program runner is running in a yarn container, hence not able
        // to get authenticated with the history.
        conf.unset("mapreduce.jobhistory.address");
        conf.setBoolean(Job.JOB_AM_ACCESS_DISABLED, false);

        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        job.getCredentials().addAll(credentials);
    }

    return job;
}

From source file:com.alibaba.jstorm.hdfs.common.security.AutoHDFS.java

License:Apache License

public void addTokensToUGI(Subject subject) {
    if (subject != null) {
        Set<Credentials> privateCredentials = subject.getPrivateCredentials(Credentials.class);
        if (privateCredentials != null) {
            for (Credentials cred : privateCredentials) {
                Collection<Token<? extends TokenIdentifier>> allTokens = cred.getAllTokens();
                if (allTokens != null) {
                    for (Token<? extends TokenIdentifier> token : allTokens) {
                        try {
                            UserGroupInformation.getCurrentUser().addToken(token);
                            LOG.info("Added delegation tokens to UGI.");
                        } catch (IOException e) {
                            LOG.error("Exception while trying to add tokens to ugi", e);
                        }/*from   ww  w.  ja  v a2  s.c om*/
                    }
                }
            }
        }
    }
}

From source file:com.alibaba.jstorm.hdfs.common.security.AutoHDFS.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map conf) {
    try {/*from  w w w  .j  av a2  s  .c  o  m*/
        if (UserGroupInformation.isSecurityEnabled()) {
            final Configuration configuration = new Configuration();

            login(configuration);

            final String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL);

            final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI)
                    ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString())
                    : FileSystem.getDefaultUri(configuration);

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() {
                @Override
                public Object run() {
                    try {
                        FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration);
                        Credentials credential = proxyUser.getCredentials();

                        fileSystem.addDelegationTokens(hdfsPrincipal, credential);
                        LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser);
                        return credential;
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            });

            ByteArrayOutputStream bao = new ByteArrayOutputStream();
            ObjectOutputStream out = new ObjectOutputStream(bao);

            creds.write(out);
            out.flush();
            out.close();

            return bao.toByteArray();
        } else {
            throw new RuntimeException("Security is not enabled for HDFS");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:com.bigjob.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//w w w.  ja va2s  .  co  m
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public boolean run() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");

    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainers; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

    while (!done && (numCompletedContainers.get() != numTotalContainers)) {
        try {
            Thread.sleep(200);
        } catch (InterruptedException ex) {
        }
    }
    finish();

    return success;
}

From source file:com.blackberry.bdp.kaboom.Authenticator.java

License:Apache License

private boolean authenticate(String proxyUserName) {
    UserGroupInformation proxyTicket;//w  ww  . j a v a 2s  . c o  m

    // logic for kerberos login
    boolean useSecurity = UserGroupInformation.isSecurityEnabled();

    LOG.info("Hadoop Security enabled: " + useSecurity);

    if (useSecurity) {
        // sanity checking
        if (kerbConfPrincipal.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a principal to use for Kerberos auth.");
            return false;
        }
        if (kerbKeytab.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a keytab to use for Kerberos auth.");
            return false;
        }

        String principal;
        try {
            // resolves _HOST pattern using standard Hadoop search/replace
            // via DNS lookup when 2nd argument is empty
            principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, "");
        } catch (IOException e) {
            LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal
                    + "). Exception follows.", e);
            return false;
        }

        Preconditions.checkNotNull(principal, "Principal must not be null");
        KerberosUser prevUser = staticLogin.get();
        KerberosUser newUser = new KerberosUser(principal, kerbKeytab);

        // be cruel and unusual when user tries to login as multiple principals
        // this isn't really valid with a reconfigure but this should be rare
        // enough to warrant a restart of the agent JVM
        // TODO: find a way to interrogate the entire current config state,
        // since we don't have to be unnecessarily protective if they switch all
        // HDFS sinks to use a different principal all at once.
        Preconditions.checkState(prevUser == null || prevUser.equals(newUser),
                "Cannot use multiple kerberos principals in the same agent. "
                        + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s",
                prevUser, newUser);

        // attempt to use cached credential if the user is the same
        // this is polite and should avoid flooding the KDC with auth requests
        UserGroupInformation curUser = null;
        if (prevUser != null && prevUser.equals(newUser)) {
            try {
                LOG.info("Attempting login as {} with cached credentials", prevUser.getPrincipal());
                curUser = UserGroupInformation.getLoginUser();
            } catch (IOException e) {
                LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e);
            }
        }

        if (curUser == null || !curUser.getUserName().equals(principal)) {
            try {
                // static login
                curUser = kerberosLogin(this, principal, kerbKeytab);
                LOG.info("Current user obtained from Kerberos login {}", curUser.getUserName());
            } catch (IOException e) {
                LOG.error("Authentication or file read error while attempting to "
                        + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab
                        + "). Exception follows.", e);
                return false;
            }
        } else {
            LOG.debug("{}: Using existing principal login: {}", this, curUser);
        }

        try {
            if (UserGroupInformation.getLoginUser().isFromKeytab() == false) {
                LOG.warn("Using a keytab for authentication is {}",
                        UserGroupInformation.getLoginUser().isFromKeytab());
                LOG.warn("curUser.isFromKeytab(): {}", curUser.isFromKeytab());
                LOG.warn("UserGroupInformation.getCurrentUser().isLoginKeytabBased(): {}",
                        UserGroupInformation.getCurrentUser().isLoginKeytabBased());
                LOG.warn("UserGroupInformation.isLoginKeytabBased(): {}",
                        UserGroupInformation.isLoginKeytabBased());
                LOG.warn("curUser.getAuthenticationMethod(): {}", curUser.getAuthenticationMethod());
                //System.exit(1);
            }
        } catch (IOException e) {
            LOG.error("Failed to get login user.", e);
            System.exit(1);
        }

        // we supposedly got through this unscathed... so store the static user
        staticLogin.set(newUser);
    }

    // hadoop impersonation works with or without kerberos security
    proxyTicket = null;
    if (!proxyUserName.isEmpty()) {
        try {
            proxyTicket = UserGroupInformation.createProxyUser(proxyUserName,
                    UserGroupInformation.getLoginUser());
        } catch (IOException e) {
            LOG.error("Unable to login as proxy user. Exception follows.", e);
            return false;
        }
    }

    UserGroupInformation ugi = null;
    if (proxyTicket != null) {
        ugi = proxyTicket;
    } else if (useSecurity) {
        try {
            ugi = UserGroupInformation.getLoginUser();
        } catch (IOException e) {
            LOG.error("Unexpected error: Unable to get authenticated user after "
                    + "apparent successful login! Exception follows.", e);
            return false;
        }
    }

    if (ugi != null) {
        // dump login information
        AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
        LOG.info("Auth method: {}", authMethod);
        LOG.info(" User name: {}", ugi.getUserName());
        LOG.info(" Using keytab: {}", ugi.isFromKeytab());
        if (authMethod == AuthenticationMethod.PROXY) {
            UserGroupInformation superUser;
            try {
                superUser = UserGroupInformation.getLoginUser();
                LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod());
                LOG.info(" Superuser name: {}", superUser.getUserName());
                LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab());
            } catch (IOException e) {
                LOG.error("Unexpected error: unknown superuser impersonating proxy.", e);
                return false;
            }
        }

        LOG.info("Logged in as user {}", ugi.getUserName());

        UGIState state = new UGIState();
        state.ugi = proxyTicket;
        state.lastAuthenticated = System.currentTimeMillis();
        proxyUserMap.put(proxyUserName, state);

        return true;
    }

    return true;
}

From source file:com.cloudera.beeswax.Server.java

License:Apache License

/**
 * Authenticate using kerberos if configured
 *//* w  ww  .  j  a  v  a  2s . c  om*/
private static void doKerberosAuth() throws IllegalArgumentException {
    if (keytabFile == null || keytabFile.isEmpty()) {
        throw new IllegalArgumentException("No keytab specified");
    }
    if (principalConf == null || principalConf.isEmpty()) {
        throw new IllegalArgumentException("No principal specified");
    }

    // Login from the keytab
    try {
        kerberosName = SecurityUtil.getServerPrincipal(principalConf, "0.0.0.0");
        UserGroupInformation.loginUserFromKeytab(kerberosName, keytabFile);
        LOG.info("Logged in using Kerberos ticket for '" + kerberosName + "' from " + keytabFile);
        bwUgi = UserGroupInformation.getCurrentUser();
        // Start a thread to periodically refresh kerberos ticket
        Thread t = new Thread(new Runnable() {
            @Override
            public void run() {
                while (true) {
                    try {
                        Thread.sleep(refreshInterval);
                    } catch (InterruptedException e) {
                        return;
                    }
                    try {
                        LOG.info("Refreshed Kerberos ticket for '" + kerberosName + "' from " + keytabFile);
                        UserGroupInformation.getLoginUser().reloginFromKeytab();
                    } catch (IOException eIO) {
                        LOG.error("Error refreshing Kerberos ticket", eIO);
                    }
                }
            }
        }, "KerberosRefresher");
        t.start();
    } catch (IOException e) {
        throw new IllegalArgumentException("Couldn't setup Kerberos authentication", e);
    }
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.NFS4Handler.java

License:Apache License

/**
 * Process a CompoundRequest and return a CompoundResponse.
 *//* www.j  av a  2s  .c o m*/
public CompoundResponse process(final RPCRequest rpcRequest, final CompoundRequest compoundRequest,
        final InetAddress clientAddress, final String sessionID) {
    Credentials creds = (Credentials) compoundRequest.getCredentials();
    // FIXME below is a hack regarding CredentialsUnix
    if (creds == null || !(creds instanceof AuthenticatedCredentials)) {
        CompoundResponse response = new CompoundResponse();
        response.setStatus(NFS4ERR_WRONGSEC);
        return response;
    }
    try {
        UserGroupInformation sudoUgi;
        String username = creds.getUsername(mConfiguration);
        if (UserGroupInformation.isSecurityEnabled()) {
            sudoUgi = UserGroupInformation.createProxyUser(username, UserGroupInformation.getCurrentUser());
        } else {
            sudoUgi = UserGroupInformation.createRemoteUser(username);
        }
        final NFS4Handler server = this;
        final Session session = new Session(rpcRequest.getXid(), compoundRequest, mConfiguration, clientAddress,
                sessionID);
        return sudoUgi.doAs(new PrivilegedExceptionAction<CompoundResponse>() {

            public CompoundResponse run() throws Exception {
                String username = UserGroupInformation.getCurrentUser().getShortUserName();
                int lastStatus = NFS4_OK;
                List<OperationResponse> responses = Lists.newArrayList();
                for (OperationRequest request : compoundRequest.getOperations()) {
                    if (LOGGER.isDebugEnabled()) {
                        LOGGER.debug(sessionID + " " + request.getClass().getSimpleName() + " for " + username);
                    }
                    OperationRequestHandler<OperationRequest, OperationResponse> handler = OperationFactory
                            .getHandler(request.getID());
                    OperationResponse response = handler.handle(server, session, request);
                    responses.add(response);
                    lastStatus = response.getStatus();
                    if (lastStatus != NFS4_OK) {
                        LOGGER.warn(sessionID + " Quitting due to " + lastStatus + " on "
                                + request.getClass().getSimpleName() + " for " + username);
                        break;
                    }
                    server.incrementMetric("NFS_" + request.getClass().getSimpleName(), 1);
                    server.incrementMetric("NFS_OPERATIONS", 1);
                }
                CompoundResponse response = new CompoundResponse();
                response.setStatus(lastStatus);
                response.setOperations(responses);
                server.incrementMetric("NFS_COMMANDS", 1);
                return response;
            }
        });
    } catch (Exception ex) {
        if (ex instanceof UndeclaredThrowableException && ex.getCause() != null) {
            Throwable throwable = ex.getCause();
            if (throwable instanceof Exception) {
                ex = (Exception) throwable;
            } else if (throwable instanceof Error) {
                // something really bad happened
                LOGGER.error(sessionID + " Unhandled Error", throwable);
                throw (Error) throwable;
            } else {
                LOGGER.error(sessionID + " Unhandled Throwable", throwable);
                throw new RuntimeException(throwable);
            }
        }
        LOGGER.warn(sessionID + " Unhandled Exception", ex);
        CompoundResponse response = new CompoundResponse();
        if (ex instanceof NFS4Exception) {
            response.setStatus(((NFS4Exception) ex).getError());
        } else if (ex instanceof UnsupportedOperationException) {
            response.setStatus(NFS4ERR_NOTSUPP);
        } else {
            LOGGER.warn(sessionID + " Setting SERVERFAULT for " + clientAddress + " for "
                    + compoundRequest.getOperations());
            response.setStatus(NFS4ERR_SERVERFAULT);
        }
        return response;
    }
}

From source file:com.cloudera.hoop.client.fs.HoopFileSystem.java

License:Open Source License

/**
 * Called after a new FileSystem instance is constructed.
 *
 * @param name a uri whose authority section names the host, port, etc. for this FileSystem
 * @param conf the configuration//from  w w  w .j ava  2s. c om
 */
@Override
public void initialize(URI name, Configuration conf) throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    doAs = ugi.getUserName();
    super.initialize(name, conf);
    try {
        uri = new URI(name.getScheme() + "://" + name.getHost() + ":" + name.getPort());
    } catch (URISyntaxException ex) {
        throw new IOException(ex);
    }
}

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

@Test(dataProvider = "Operations")
@TestDir/*  ww  w. ja v a  2s . c o m*/
@TestServlet
@TestHadoop
public void testOperationDoAs(final Operation op) throws Exception {
    createHoopServer();
    UserGroupInformation ugi = UserGroupInformation.createProxyUser(getHadoopUsers()[0],
            UserGroupInformation.getCurrentUser());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            operation(op);
            return null;
        }
    });
}