Example usage for org.apache.hadoop.security UserGroupInformation getUserName

List of usage examples for org.apache.hadoop.security UserGroupInformation getUserName

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getUserName.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public String getUserName() 

Source Link

Document

Get the user's full principal name.

Usage

From source file:com.cloudera.impala.security.DelegationTokenSecretManager.java

License:Apache License

public synchronized String getDelegationToken(String renewer) throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    Text owner = new Text(ugi.getUserName());
    Text realUser = null;/*from   www  . j av  a2s. c o  m*/
    if (ugi.getRealUser() != null) {
        realUser = new Text(ugi.getRealUser().getUserName());
    }
    DelegationTokenIdentifier ident = new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
    Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>(ident, this);
    LOGGER.info("Generated delegation token. Identifer=" + ident);
    return t.encodeToUrlString();
}

From source file:com.datatorrent.stram.LaunchContainerRunnable.java

License:Apache License

public static ByteBuffer getTokens(StramDelegationTokenManager delegationTokenManager,
        InetSocketAddress heartbeatAddress) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getLoginUser();
        StramDelegationTokenIdentifier identifier = new StramDelegationTokenIdentifier(
                new Text(ugi.getUserName()), new Text(""), new Text(""));
        String service = heartbeatAddress.getAddress().getHostAddress() + ":" + heartbeatAddress.getPort();
        Token<StramDelegationTokenIdentifier> stramToken = new Token<StramDelegationTokenIdentifier>(identifier,
                delegationTokenManager);
        stramToken.setService(new Text(service));
        return getTokens(ugi, stramToken);
    }//from w w  w. j  a v  a 2 s  .  c o m
    return null;
}

From source file:com.datatorrent.stram.security.StramWSFilter.java

License:Apache License

@Override
public void init(FilterConfig conf) throws ServletException {
    proxyHost = conf.getInitParameter(PROXY_HOST);
    tokenManager = new StramDelegationTokenManager(DELEGATION_KEY_UPDATE_INTERVAL,
            DELEGATION_TOKEN_MAX_LIFETIME, DELEGATION_TOKEN_RENEW_INTERVAL,
            DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL);
    sequenceNumber = new AtomicInteger(0);
    try {//from  w  w w .  ja  va  2  s .c o m
        UserGroupInformation ugi = UserGroupInformation.getLoginUser();
        if (ugi != null) {
            loginUser = ugi.getUserName();
        }
        tokenManager.startThreads();
    } catch (IOException e) {
        throw new ServletException(e);
    }
}

From source file:com.datatorrent.stram.StreamingAppMasterService.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException//from w w w .jav a 2  s .  c  om
 */
@SuppressWarnings("SleepWhileInLoop")
private void execute() throws YarnException, IOException {
    LOG.info("Starting ApplicationMaster");
    final Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    LOG.info("number of tokens: {}", credentials.getAllTokens().size());
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.debug("token: {}", token);
    }
    final Configuration conf = getConfig();
    long tokenLifeTime = (long) (dag.getValue(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR) * Math
            .min(dag.getValue(LogicalPlan.HDFS_TOKEN_LIFE_TIME), dag.getValue(LogicalPlan.RM_TOKEN_LIFE_TIME)));
    long expiryTime = System.currentTimeMillis() + tokenLifeTime;
    LOG.debug(" expiry token time {}", tokenLifeTime);
    String hdfsKeyTabFile = dag.getValue(LogicalPlan.KEY_TAB_FILE);

    // Register self with ResourceManager
    RegisterApplicationMasterResponse response = amRmClient.registerApplicationMaster(appMasterHostname, 0,
            appMasterTrackingUrl);

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    int maxVcores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max mem {}m and vcores {} capabililty of resources in this cluster ", maxMem, maxVcores);

    // for locality relaxation fall back
    Map<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> requestedResources = Maps
            .newHashMap();

    // Setup heartbeat emitter
    // TODO poll RM every now and then with an empty request to let RM know that we are alive
    // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting:
    // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS
    // The allocate calls to the RM count as heartbeat so, for now, this additional heartbeat emitter
    // is not required.

    int loopCounter = -1;
    List<ContainerId> releasedContainers = new ArrayList<ContainerId>();
    int numTotalContainers = 0;
    // keep track of already requested containers to not request them again while waiting for allocation
    int numRequestedContainers = 0;
    int numReleasedContainers = 0;
    int nextRequestPriority = 0;
    ResourceRequestHandler resourceRequestor = new ResourceRequestHandler();

    YarnClient clientRMService = YarnClient.createYarnClient();

    try {
        // YARN-435
        // we need getClusterNodes to populate the initial node list,
        // subsequent updates come through the heartbeat response
        clientRMService.init(conf);
        clientRMService.start();

        ApplicationReport ar = StramClientUtils.getStartedAppInstanceByName(clientRMService,
                dag.getAttributes().get(DAG.APPLICATION_NAME),
                UserGroupInformation.getLoginUser().getUserName(), dag.getAttributes().get(DAG.APPLICATION_ID));
        if (ar != null) {
            appDone = true;
            dnmgr.shutdownDiagnosticsMessage = String.format(
                    "Application master failed due to application %s with duplicate application name \"%s\" by the same user \"%s\" is already started.",
                    ar.getApplicationId().toString(), ar.getName(), ar.getUser());
            LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage);
            finishApplication(FinalApplicationStatus.FAILED, numTotalContainers);
            return;
        }
        resourceRequestor.updateNodeReports(clientRMService.getNodeReports());
    } catch (Exception e) {
        throw new RuntimeException("Failed to retrieve cluster nodes report.", e);
    } finally {
        clientRMService.stop();
    }

    // check for previously allocated containers
    // as of 2.2, containers won't survive AM restart, but this will change in the future - YARN-1490
    checkContainerStatus();
    FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED;
    final InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);

    while (!appDone) {
        loopCounter++;

        if (UserGroupInformation.isSecurityEnabled() && System.currentTimeMillis() >= expiryTime
                && hdfsKeyTabFile != null) {
            String applicationId = appAttemptID.getApplicationId().toString();
            expiryTime = StramUserLogin.refreshTokens(tokenLifeTime, "." + File.separator + "tmp",
                    applicationId, conf, hdfsKeyTabFile, credentials, rmAddress, true);
        }

        Runnable r;
        while ((r = this.pendingTasks.poll()) != null) {
            r.run();
        }

        // log current state
        /*
         * LOG.info("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total=" +
         * numTotalContainers + ", requested=" + numRequestedContainers + ", completed=" + numCompletedContainers +
         * ", failed=" + numFailedContainers + ", currentAllocated=" + this.allAllocatedContainers.size());
         */
        // Sleep before each loop when asking RM for containers
        // to avoid flooding RM with spurious requests when it
        // need not have any available containers
        try {
            sleep(1000);
        } catch (InterruptedException e) {
            LOG.info("Sleep interrupted " + e.getMessage());
        }

        // Setup request to be sent to RM to allocate containers
        List<ContainerRequest> containerRequests = new ArrayList<ContainerRequest>();
        List<ContainerRequest> removedContainerRequests = new ArrayList<ContainerRequest>();

        // request containers for pending deploy requests
        if (!dnmgr.containerStartRequests.isEmpty()) {
            StreamingContainerAgent.ContainerStartRequest csr;
            while ((csr = dnmgr.containerStartRequests.poll()) != null) {
                if (csr.container.getRequiredMemoryMB() > maxMem) {
                    LOG.warn("Container memory {}m above max threshold of cluster. Using max value {}m.",
                            csr.container.getRequiredMemoryMB(), maxMem);
                    csr.container.setRequiredMemoryMB(maxMem);
                }
                if (csr.container.getRequiredVCores() > maxVcores) {
                    LOG.warn("Container vcores {} above max threshold of cluster. Using max value {}.",
                            csr.container.getRequiredVCores(), maxVcores);
                    csr.container.setRequiredVCores(maxVcores);
                }
                csr.container.setResourceRequestPriority(nextRequestPriority++);
                ContainerRequest cr = resourceRequestor.createContainerRequest(csr, true);
                MutablePair<Integer, ContainerRequest> pair = new MutablePair<Integer, ContainerRequest>(
                        loopCounter, cr);
                requestedResources.put(csr, pair);
                containerRequests.add(cr);
            }
        }

        if (!requestedResources.isEmpty()) {
            //resourceRequestor.clearNodeMapping();
            for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources
                    .entrySet()) {
                if ((loopCounter - entry.getValue().getKey()) > NUMBER_MISSED_HEARTBEATS) {
                    StreamingContainerAgent.ContainerStartRequest csr = entry.getKey();
                    removedContainerRequests.add(entry.getValue().getRight());
                    ContainerRequest cr = resourceRequestor.createContainerRequest(csr, false);
                    entry.getValue().setLeft(loopCounter);
                    entry.getValue().setRight(cr);
                    containerRequests.add(cr);
                }
            }
        }

        numTotalContainers += containerRequests.size();
        numRequestedContainers += containerRequests.size();
        AllocateResponse amResp = sendContainerAskToRM(containerRequests, removedContainerRequests,
                releasedContainers);
        if (amResp.getAMCommand() != null) {
            LOG.info(" statement executed:{}", amResp.getAMCommand());
            switch (amResp.getAMCommand()) {
            case AM_RESYNC:
            case AM_SHUTDOWN:
                throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM");
            default:
                throw new YarnRuntimeException("Received the " + amResp.getAMCommand() + " command from RM");

            }
        }
        releasedContainers.clear();

        // Retrieve list of allocated containers from the response
        List<Container> newAllocatedContainers = amResp.getAllocatedContainers();
        // LOG.info("Got response from RM for container ask, allocatedCnt=" + newAllocatedContainers.size());
        numRequestedContainers -= newAllocatedContainers.size();
        long timestamp = System.currentTimeMillis();
        for (Container allocatedContainer : newAllocatedContainers) {

            LOG.info("Got new container." + ", containerId=" + allocatedContainer.getId() + ", containerNode="
                    + allocatedContainer.getNodeId() + ", containerNodeURI="
                    + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory"
                    + allocatedContainer.getResource().getMemory() + ", priority"
                    + allocatedContainer.getPriority());
            // + ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString());

            boolean alreadyAllocated = true;
            StreamingContainerAgent.ContainerStartRequest csr = null;
            for (Map.Entry<StreamingContainerAgent.ContainerStartRequest, MutablePair<Integer, ContainerRequest>> entry : requestedResources
                    .entrySet()) {
                if (entry.getKey().container.getResourceRequestPriority() == allocatedContainer.getPriority()
                        .getPriority()) {
                    alreadyAllocated = false;
                    csr = entry.getKey();
                    break;
                }
            }

            if (alreadyAllocated) {
                LOG.info("Releasing {} as resource with priority {} was already assigned",
                        allocatedContainer.getId(), allocatedContainer.getPriority());
                releasedContainers.add(allocatedContainer.getId());
                numReleasedContainers++;
                numRequestedContainers++;
                continue;
            }
            if (csr != null) {
                requestedResources.remove(csr);
            }

            // allocate resource to container
            ContainerResource resource = new ContainerResource(allocatedContainer.getPriority().getPriority(),
                    allocatedContainer.getId().toString(), allocatedContainer.getNodeId().toString(),
                    allocatedContainer.getResource().getMemory(),
                    allocatedContainer.getResource().getVirtualCores(),
                    allocatedContainer.getNodeHttpAddress());
            StreamingContainerAgent sca = dnmgr.assignContainer(resource, null);

            if (sca == null) {
                // allocated container no longer needed, add release request
                LOG.warn("Container {} allocated but nothing to deploy, going to release this container.",
                        allocatedContainer.getId());
                releasedContainers.add(allocatedContainer.getId());
            } else {
                AllocatedContainer allocatedContainerHolder = new AllocatedContainer(allocatedContainer);
                this.allocatedContainers.put(allocatedContainer.getId().toString(), allocatedContainerHolder);
                ByteBuffer tokens = null;
                if (UserGroupInformation.isSecurityEnabled()) {
                    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
                    Token<StramDelegationTokenIdentifier> delegationToken = allocateDelegationToken(
                            ugi.getUserName(), heartbeatListener.getAddress());
                    allocatedContainerHolder.delegationToken = delegationToken;
                    //ByteBuffer tokens = LaunchContainerRunnable.getTokens(delegationTokenManager, heartbeatListener.getAddress());
                    tokens = LaunchContainerRunnable.getTokens(ugi, delegationToken);
                }
                LaunchContainerRunnable launchContainer = new LaunchContainerRunnable(allocatedContainer,
                        nmClient, sca, tokens);
                // Thread launchThread = new Thread(runnableLaunchContainer);
                // launchThreads.add(launchThread);
                // launchThread.start();
                launchContainer.run(); // communication with NMs is now async

                // record container start event
                StramEvent ev = new StramEvent.StartContainerEvent(allocatedContainer.getId().toString(),
                        allocatedContainer.getNodeId().toString());
                ev.setTimestamp(timestamp);
                dnmgr.recordEventAsync(ev);
            }
        }

        // track node updates for future locality constraint allocations
        // TODO: it seems 2.0.4-alpha doesn't give us any updates
        resourceRequestor.updateNodeReports(amResp.getUpdatedNodes());

        // Check the completed containers
        List<ContainerStatus> completedContainers = amResp.getCompletedContainersStatuses();
        // LOG.debug("Got response from RM for container ask, completedCnt=" + completedContainers.size());
        for (ContainerStatus containerStatus : completedContainers) {
            LOG.info("Completed containerId=" + containerStatus.getContainerId() + ", state="
                    + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus()
                    + ", diagnostics=" + containerStatus.getDiagnostics());

            // non complete containers should not be here
            assert (containerStatus.getState() == ContainerState.COMPLETE);

            AllocatedContainer allocatedContainer = allocatedContainers
                    .remove(containerStatus.getContainerId().toString());
            if (allocatedContainer != null && allocatedContainer.delegationToken != null) {
                UserGroupInformation ugi = UserGroupInformation.getLoginUser();
                delegationTokenManager.cancelToken(allocatedContainer.delegationToken, ugi.getUserName());
            }
            int exitStatus = containerStatus.getExitStatus();
            if (0 != exitStatus) {
                if (allocatedContainer != null) {
                    numFailedContainers.incrementAndGet();
                }
                //          if (exitStatus == 1) {
                //            // non-recoverable StreamingContainer failure
                //            appDone = true;
                //            finalStatus = FinalApplicationStatus.FAILED;
                //            dnmgr.shutdownDiagnosticsMessage = "Unrecoverable failure " + containerStatus.getContainerId();
                //            LOG.info("Exiting due to: {}", dnmgr.shutdownDiagnosticsMessage);
                //          }
                //          else {
                // Recoverable failure or process killed (externally or via stop request by AM)
                // also occurs when a container was released by the application but never assigned/launched
                LOG.debug("Container {} failed or killed.", containerStatus.getContainerId());
                dnmgr.scheduleContainerRestart(containerStatus.getContainerId().toString());
                //          }
            } else {
                // container completed successfully
                numCompletedContainers.incrementAndGet();
                LOG.info("Container completed successfully." + ", containerId="
                        + containerStatus.getContainerId());
            }

            String containerIdStr = containerStatus.getContainerId().toString();
            dnmgr.removeContainerAgent(containerIdStr);

            // record container stop event
            StramEvent ev = new StramEvent.StopContainerEvent(containerIdStr, containerStatus.getExitStatus());
            ev.setReason(containerStatus.getDiagnostics());
            dnmgr.recordEventAsync(ev);
        }

        if (dnmgr.forcedShutdown) {
            LOG.info("Forced shutdown due to {}", dnmgr.shutdownDiagnosticsMessage);
            finalStatus = FinalApplicationStatus.FAILED;
            appDone = true;
        } else if (allocatedContainers.isEmpty() && numRequestedContainers == 0
                && dnmgr.containerStartRequests.isEmpty()) {
            LOG.debug("Exiting as no more containers are allocated or requested");
            finalStatus = FinalApplicationStatus.SUCCEEDED;
            appDone = true;
        }

        LOG.debug("Current application state: loop=" + loopCounter + ", appDone=" + appDone + ", total="
                + numTotalContainers + ", requested=" + numRequestedContainers + ", released="
                + numReleasedContainers + ", completed=" + numCompletedContainers + ", failed="
                + numFailedContainers + ", currentAllocated=" + allocatedContainers.size());

        // monitor child containers
        dnmgr.monitorHeartbeat();
    }

    finishApplication(finalStatus, numTotalContainers);
}

From source file:com.huayu.metis.flume.sink.hdfs.HDFSEventSink.java

License:Apache License

private boolean authenticate() {

    // logic for kerberos login
    boolean useSecurity = UserGroupInformation.isSecurityEnabled();

    LOG.info("Hadoop Security enabled: " + useSecurity);

    if (useSecurity) {

        // sanity checking
        if (kerbConfPrincipal.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a principal to use for Kerberos auth.");
            return false;
        }// w w  w  .j ava2  s.c  om
        if (kerbKeytab.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a keytab to use for Kerberos auth.");
            return false;
        } else {
            //If keytab is specified, user should want it take effect.
            //HDFSEventSink will halt when keytab file is non-exist or unreadable
            File kfile = new File(kerbKeytab);
            if (!(kfile.isFile() && kfile.canRead())) {
                throw new IllegalArgumentException(
                        "The keyTab file: " + kerbKeytab + " is nonexistent or can't read. "
                                + "Please specify a readable keytab file for Kerberos auth.");
            }
        }

        String principal;
        try {
            // resolves _HOST pattern using standard Hadoop search/replace
            // via DNS lookup when 2nd argument is empty
            principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, "");
        } catch (IOException e) {
            LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal
                    + "). Exception follows.", e);
            return false;
        }

        Preconditions.checkNotNull(principal, "Principal must not be null");
        KerberosUser prevUser = staticLogin.get();
        KerberosUser newUser = new KerberosUser(principal, kerbKeytab);

        // be cruel and unusual when user tries to login as multiple principals
        // this isn't really valid with a reconfigure but this should be rare
        // enough to warrant a restart of the agent JVM
        // TODO: find a way to interrogate the entire current config state,
        // since we don't have to be unnecessarily protective if they switch all
        // HDFS sinks to use a different principal all at once.
        Preconditions.checkState(prevUser == null || prevUser.equals(newUser),
                "Cannot use multiple kerberos principals in the same agent. "
                        + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s",
                prevUser, newUser);

        // attempt to use cached credential if the user is the same
        // this is polite and should avoid flooding the KDC with auth requests
        UserGroupInformation curUser = null;
        if (prevUser != null && prevUser.equals(newUser)) {
            try {
                curUser = UserGroupInformation.getLoginUser();
            } catch (IOException e) {
                LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e);
            }
        }

        if (curUser == null || !curUser.getUserName().equals(principal)) {
            try {
                // static login
                kerberosLogin(this, principal, kerbKeytab);
            } catch (IOException e) {
                LOG.error("Authentication or file read error while attempting to "
                        + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab
                        + "). Exception follows.", e);
                return false;
            }
        } else {
            LOG.debug("{}: Using existing principal login: {}", this, curUser);
        }

        // we supposedly got through this unscathed... so store the static user
        staticLogin.set(newUser);
    }

    // hadoop impersonation works with or without kerberos security
    proxyTicket = null;
    if (!proxyUserName.isEmpty()) {
        try {
            proxyTicket = UserGroupInformation.createProxyUser(proxyUserName,
                    UserGroupInformation.getLoginUser());
        } catch (IOException e) {
            LOG.error("Unable to login as proxy user. Exception follows.", e);
            return false;
        }
    }

    UserGroupInformation ugi = null;
    if (proxyTicket != null) {
        ugi = proxyTicket;
    } else if (useSecurity) {
        try {
            ugi = UserGroupInformation.getLoginUser();
        } catch (IOException e) {
            LOG.error("Unexpected error: Unable to get authenticated user after "
                    + "apparent successful login! Exception follows.", e);
            return false;
        }
    }

    if (ugi != null) {
        // dump login information
        AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
        LOG.info("Auth method: {}", authMethod);
        LOG.info(" User name: {}", ugi.getUserName());
        LOG.info(" Using keytab: {}", ugi.isFromKeytab());
        if (authMethod == AuthenticationMethod.PROXY) {
            UserGroupInformation superUser;
            try {
                superUser = UserGroupInformation.getLoginUser();
                LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod());
                LOG.info(" Superuser name: {}", superUser.getUserName());
                LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab());
            } catch (IOException e) {
                LOG.error("Unexpected error: unknown superuser impersonating proxy.", e);
                return false;
            }
        }

        LOG.info("Logged in as user {}", ugi.getUserName());

        return true;
    }

    return true;
}

From source file:com.huayu.metis.flume.sink.hdfs.HDFSEventSink.java

License:Apache License

/**
 * Static synchronized method for static Kerberos login. <br/>
 * Static synchronized due to a thundering herd problem when multiple Sinks
 * attempt to log in using the same principal at the same time with the
 * intention of impersonating different users (or even the same user).
 * If this is not controlled, MIT Kerberos v5 believes it is seeing a replay
 * attach and it returns:/*from w  w  w.  j a va  2s  . c  o  m*/
 * <blockquote>Request is a replay (34) - PROCESS_TGS</blockquote>
 * In addition, since the underlying Hadoop APIs we are using for
 * impersonation are static, we define this method as static as well.
 *
 * @param principal Fully-qualified principal to use for authentication.
 * @param keytab    Location of keytab file containing credentials for principal.
 * @return Logged-in user
 * @throws java.io.IOException if login fails.
 */
private static synchronized UserGroupInformation kerberosLogin(HDFSEventSink sink, String principal,
        String keytab) throws IOException {

    // if we are the 2nd user thru the lock, the login should already be
    // available statically if login was successful
    UserGroupInformation curUser = null;
    try {
        curUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        // not a big deal but this shouldn't typically happen because it will
        // generally fall back to the UNIX user
        LOG.debug("Unable to get login user before Kerberos auth attempt.", e);
    }

    // we already have logged in successfully
    if (curUser != null && curUser.getUserName().equals(principal)) {
        LOG.debug("{}: Using existing principal ({}): {}", new Object[] { sink, principal, curUser });

        // no principal found
    } else {

        LOG.info("{}: Attempting kerberos login as principal ({}) from keytab " + "file ({})",
                new Object[] { sink, principal, keytab });

        // attempt static kerberos login
        UserGroupInformation.loginUserFromKeytab(principal, keytab);
        curUser = UserGroupInformation.getLoginUser();
    }

    return curUser;
}

From source file:com.pigai.hadoop.HttpFSFileSystem.java

License:Apache License

/**
 * Called after a new FileSystem instance is constructed.
 * /*from  w  w  w. j av a 2 s . c om*/
 * @param name
 *            a uri whose authority section names the host, port, etc. for
 *            this FileSystem
 * @param conf
 *            the configuration
 */
@Override
public void initialize(URI name, Configuration conf) throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    doAs = ugi.getUserName();
    //todo ?
    doAs = "hdfs";
    super.initialize(name, conf);
    try {
        uri = new URI(name.getScheme() + "://" + name.getHost() + ":" + name.getPort());
    } catch (URISyntaxException ex) {
        throw new IOException(ex);
    }
}

From source file:com.rim.logdriver.sawmill.Authenticator.java

License:Apache License

private boolean authenticate(String proxyUserName) {
    UserGroupInformation proxyTicket;/*from ww  w  .  j  a  v  a  2  s . c o m*/

    // logic for kerberos login
    boolean useSecurity = UserGroupInformation.isSecurityEnabled();

    LOG.info("Hadoop Security enabled: " + useSecurity);

    if (useSecurity) {
        // sanity checking
        if (kerbConfPrincipal.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a principal to use for Kerberos auth.");
            return false;
        }
        if (kerbKeytab.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a keytab to use for Kerberos auth.");
            return false;
        }

        String principal;
        try {
            // resolves _HOST pattern using standard Hadoop search/replace
            // via DNS lookup when 2nd argument is empty
            principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, "");
        } catch (IOException e) {
            LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal
                    + "). Exception follows.", e);
            return false;
        }

        Preconditions.checkNotNull(principal, "Principal must not be null");
        KerberosUser prevUser = staticLogin.get();
        KerberosUser newUser = new KerberosUser(principal, kerbKeytab);

        // be cruel and unusual when user tries to login as multiple principals
        // this isn't really valid with a reconfigure but this should be rare
        // enough to warrant a restart of the agent JVM
        // TODO: find a way to interrogate the entire current config state,
        // since we don't have to be unnecessarily protective if they switch all
        // HDFS sinks to use a different principal all at once.
        Preconditions.checkState(prevUser == null || prevUser.equals(newUser),
                "Cannot use multiple kerberos principals in the same agent. "
                        + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s",
                prevUser, newUser);

        // attempt to use cached credential if the user is the same
        // this is polite and should avoid flooding the KDC with auth requests
        UserGroupInformation curUser = null;
        if (prevUser != null && prevUser.equals(newUser)) {
            try {
                curUser = UserGroupInformation.getLoginUser();
            } catch (IOException e) {
                LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e);
            }
        }

        if (curUser == null || !curUser.getUserName().equals(principal)) {
            try {
                // static login
                kerberosLogin(this, principal, kerbKeytab);
            } catch (IOException e) {
                LOG.error("Authentication or file read error while attempting to "
                        + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab
                        + "). Exception follows.", e);
                return false;
            }
        } else {
            LOG.debug("{}: Using existing principal login: {}", this, curUser);
        }

        try {
            if (UserGroupInformation.getLoginUser().isFromKeytab() == false) {
                LOG.error("Not using a keytab for authentication.  Shutting down.");
                System.exit(1);
            }
        } catch (IOException e) {
            LOG.error("Failed to get login user.", e);
            System.exit(1);
        }

        // we supposedly got through this unscathed... so store the static user
        staticLogin.set(newUser);
    }

    // hadoop impersonation works with or without kerberos security
    proxyTicket = null;
    if (!proxyUserName.isEmpty()) {
        try {
            proxyTicket = UserGroupInformation.createProxyUser(proxyUserName,
                    UserGroupInformation.getLoginUser());
        } catch (IOException e) {
            LOG.error("Unable to login as proxy user. Exception follows.", e);
            return false;
        }
    }

    UserGroupInformation ugi = null;
    if (proxyTicket != null) {
        ugi = proxyTicket;
    } else if (useSecurity) {
        try {
            ugi = UserGroupInformation.getLoginUser();
        } catch (IOException e) {
            LOG.error("Unexpected error: Unable to get authenticated user after "
                    + "apparent successful login! Exception follows.", e);
            return false;
        }
    }

    if (ugi != null) {
        // dump login information
        AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
        LOG.info("Auth method: {}", authMethod);
        LOG.info(" User name: {}", ugi.getUserName());
        LOG.info(" Using keytab: {}", ugi.isFromKeytab());
        if (authMethod == AuthenticationMethod.PROXY) {
            UserGroupInformation superUser;
            try {
                superUser = UserGroupInformation.getLoginUser();
                LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod());
                LOG.info(" Superuser name: {}", superUser.getUserName());
                LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab());
            } catch (IOException e) {
                LOG.error("Unexpected error: unknown superuser impersonating proxy.", e);
                return false;
            }
        }

        LOG.info("Logged in as user {}", ugi.getUserName());

        UGIState state = new UGIState();
        state.ugi = proxyTicket;
        state.lastAuthenticated = System.currentTimeMillis();
        proxyUserMap.put(proxyUserName, state);

        return true;
    }

    return true;
}

From source file:com.streamsets.datacollector.security.TestHadoopSecurityUtil.java

License:Apache License

@Test
public void testGetLoginUser() throws Exception {
    final Configuration conf = new Configuration();
    UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
    UserGroupInformation ugi = HadoopSecurityUtil.getLoginUser(conf);
    Assert.assertEquals(loginUser.getUserName(), ugi.getUserName());
}

From source file:com.streamsets.datacollector.security.TestHadoopSecurityUtil.java

License:Apache License

@Test
public void testGetProxyUser() throws Exception {
    final UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo",
            new String[] { "all" });
    UserGroupInformation ugi = HadoopSecurityUtil.getProxyUser("proxy", fooUgi);
    Assert.assertEquals("proxy", ugi.getUserName());
}