Example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser

List of usage examples for org.apache.hadoop.security UserGroupInformation createRemoteUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user) 

Source Link

Document

Create a user from a login name.

Usage

From source file:co.cask.cdap.internal.app.runtime.ProgramRunners.java

License:Apache License

/**
 * Impersonates as the given user to perform an action.
 *
 * @param user user to impersonate/*  w w  w. java 2s . co  m*/
 * @param callable action to perform
 */
public static <T> T runAsUser(String user, final Callable<T> callable)
        throws IOException, InterruptedException {
    return UserGroupInformation.createRemoteUser(user).doAs(new PrivilegedExceptionAction<T>() {
        @Override
        public T run() throws Exception {
            return callable.call();
        }
    });
}

From source file:co.cask.cdap.security.impersonation.RemoteUGIProvider.java

License:Apache License

@Override
protected UserGroupInformation createUGI(ImpersonationInfo impersonationInfo) throws IOException {
    String credentialsURI = executeRequest(impersonationInfo).getResponseBodyAsString();
    LOG.debug("Received response: {}", credentialsURI);

    Location location = locationFactory.create(URI.create(credentialsURI));
    try {/*from   w ww  .j ava 2  s . com*/
        UserGroupInformation impersonatedUGI = UserGroupInformation
                .createRemoteUser(impersonationInfo.getPrincipal());
        impersonatedUGI.addCredentials(readCredentials(location));
        return impersonatedUGI;
    } finally {
        try {
            if (!location.delete()) {
                LOG.warn("Failed to delete location: {}", location);
            }
        } catch (IOException e) {
            LOG.warn("Exception raised when deleting location {}", location, e);
        }
    }
}

From source file:com.cloudera.beeswax.BeeswaxServiceImpl.java

License:Apache License

private <T> T doWithState(RunningQueryState state, PrivilegedExceptionAction<T> action)
        throws BeeswaxException {
    try {//from  w ww . ja  v  a 2  s  .co  m
        UserGroupInformation ugi;
        if (UserGroupInformation.isSecurityEnabled())
            ugi = UserGroupInformation.createProxyUser(state.query.hadoop_user,
                    UserGroupInformation.getLoginUser());
        else {
            ugi = UserGroupInformation.createRemoteUser(state.query.hadoop_user);
        }
        return ugi.doAs(action);
    } catch (UndeclaredThrowableException e) {
        if (e.getUndeclaredThrowable() instanceof PrivilegedActionException) {
            Throwable bwe = e.getUndeclaredThrowable().getCause();
            if (bwe instanceof BeeswaxException) {
                LOG.error("Caught BeeswaxException", (BeeswaxException) bwe);
                throw (BeeswaxException) bwe;
            }
        }
        LOG.error("Caught unexpected exception.", e);
        throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle);
    } catch (IOException e) {
        LOG.error("Caught IOException", e);
        throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle);
    } catch (InterruptedException e) {
        LOG.error("Caught InterruptedException", e);
        throw new BeeswaxException(e.getMessage(), state.handle.log_context, state.handle);
    }
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.NFS4Handler.java

License:Apache License

/**
 * Process a CompoundRequest and return a CompoundResponse.
 *//*from www  .j  a  v  a 2 s  . com*/
public CompoundResponse process(final RPCRequest rpcRequest, final CompoundRequest compoundRequest,
        final InetAddress clientAddress, final String sessionID) {
    Credentials creds = (Credentials) compoundRequest.getCredentials();
    // FIXME below is a hack regarding CredentialsUnix
    if (creds == null || !(creds instanceof AuthenticatedCredentials)) {
        CompoundResponse response = new CompoundResponse();
        response.setStatus(NFS4ERR_WRONGSEC);
        return response;
    }
    try {
        UserGroupInformation sudoUgi;
        String username = creds.getUsername(mConfiguration);
        if (UserGroupInformation.isSecurityEnabled()) {
            sudoUgi = UserGroupInformation.createProxyUser(username, UserGroupInformation.getCurrentUser());
        } else {
            sudoUgi = UserGroupInformation.createRemoteUser(username);
        }
        final NFS4Handler server = this;
        final Session session = new Session(rpcRequest.getXid(), compoundRequest, mConfiguration, clientAddress,
                sessionID);
        return sudoUgi.doAs(new PrivilegedExceptionAction<CompoundResponse>() {

            public CompoundResponse run() throws Exception {
                String username = UserGroupInformation.getCurrentUser().getShortUserName();
                int lastStatus = NFS4_OK;
                List<OperationResponse> responses = Lists.newArrayList();
                for (OperationRequest request : compoundRequest.getOperations()) {
                    if (LOGGER.isDebugEnabled()) {
                        LOGGER.debug(sessionID + " " + request.getClass().getSimpleName() + " for " + username);
                    }
                    OperationRequestHandler<OperationRequest, OperationResponse> handler = OperationFactory
                            .getHandler(request.getID());
                    OperationResponse response = handler.handle(server, session, request);
                    responses.add(response);
                    lastStatus = response.getStatus();
                    if (lastStatus != NFS4_OK) {
                        LOGGER.warn(sessionID + " Quitting due to " + lastStatus + " on "
                                + request.getClass().getSimpleName() + " for " + username);
                        break;
                    }
                    server.incrementMetric("NFS_" + request.getClass().getSimpleName(), 1);
                    server.incrementMetric("NFS_OPERATIONS", 1);
                }
                CompoundResponse response = new CompoundResponse();
                response.setStatus(lastStatus);
                response.setOperations(responses);
                server.incrementMetric("NFS_COMMANDS", 1);
                return response;
            }
        });
    } catch (Exception ex) {
        if (ex instanceof UndeclaredThrowableException && ex.getCause() != null) {
            Throwable throwable = ex.getCause();
            if (throwable instanceof Exception) {
                ex = (Exception) throwable;
            } else if (throwable instanceof Error) {
                // something really bad happened
                LOGGER.error(sessionID + " Unhandled Error", throwable);
                throw (Error) throwable;
            } else {
                LOGGER.error(sessionID + " Unhandled Throwable", throwable);
                throw new RuntimeException(throwable);
            }
        }
        LOGGER.warn(sessionID + " Unhandled Exception", ex);
        CompoundResponse response = new CompoundResponse();
        if (ex instanceof NFS4Exception) {
            response.setStatus(((NFS4Exception) ex).getError());
        } else if (ex instanceof UnsupportedOperationException) {
            response.setStatus(NFS4ERR_NOTSUPP);
        } else {
            LOGGER.warn(sessionID + " Setting SERVERFAULT for " + clientAddress + " for "
                    + compoundRequest.getOperations());
            response.setStatus(NFS4ERR_SERVERFAULT);
        }
        return response;
    }
}

From source file:com.cloudera.hue.SudoFsShell.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 1) {
        usage();//w w  w  .j  a  v  a  2  s.c  o  m
        System.exit(1);
    }

    String username = args[0];
    final String shellArgs[] = new String[args.length - 1];
    System.arraycopy(args, 1, shellArgs, 0, args.length - 1);

    UserGroupInformation sudoUgi;
    if (UserGroupInformation.isSecurityEnabled()) {
        sudoUgi = UserGroupInformation.createProxyUser(username, UserGroupInformation.getCurrentUser());
    } else {
        sudoUgi = UserGroupInformation.createRemoteUser(username);
    }

    sudoUgi.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            FsShell.main(shellArgs);
            return null;
        }
    });
}

From source file:com.cloudera.impala.util.RequestPoolService.java

License:Apache License

/**
 * Indicates if a user has access to the pool.
 *
 * @param pool the pool to check if the user has access to. NOTE: it should always be
 * called with a pool returned by the {@link #assignToPool(String, String)} method.
 * @param user the user to check if it has access to the pool.
 * @return True if the user has access to the pool.
 *///  w ww.  j  a  va  2  s . c  om
@VisibleForTesting
boolean hasAccess(String pool, String user) {
    Preconditions.checkState(running_.get());
    Preconditions.checkArgument(!Strings.isNullOrEmpty(pool));
    Preconditions.checkArgument(!Strings.isNullOrEmpty(user));
    // Convert the user name to a short name (e.g. 'user1@domain' to 'user1') because
    // the UserGroupInformation will check group membership which should always be done
    // on the short name of the principal.
    String shortName = new User(user).getShortName();
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(shortName);
    return allocationConf_.get().hasAccess(pool, QueueACL.SUBMIT_APPLICATIONS, ugi);
}

From source file:com.datatorrent.stram.webapp.StramWebServices.java

License:Apache License

Boolean hasAccess(HttpServletRequest request) {
    String remoteUser = request.getRemoteUser();
    if (remoteUser != null) {
        UserGroupInformation callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
        if (callerUGI != null) {
            return false;
        }//from ww w.  ja  v  a  2s . co  m
    }
    return true;
}

From source file:com.ebay.jetstream.event.processor.hdfs.HdfsClient.java

License:MIT License

protected void initHdfs() {
    hdpConf = new Configuration();
    final String hdfsUrl = config.getHdfsUrl();
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(config.getUser());

    try {/* w  w w.  ja  va2  s  .  c  o  m*/
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                hdpConf.set("hadoop.job.ugi", config.getUser());
                hdpConf.set("fs.defaultFS", hdfsUrl);
                if (hdfsUrl.startsWith("hdfs")) {
                    for (Object keyObj : config.getHadoopProperties().keySet()) {
                        String key = (String) keyObj;
                        hdpConf.set(key, config.getHadoopProperties().getProperty(key));
                    }
                    fs = new DistributedFileSystem();
                    fs.initialize(URI.create(hdfsUrl), hdpConf);
                } else {
                    fs = FileSystem.get(hdpConf);
                }
                LOGGER.log(Level.INFO, "Connected to HDFS with the following properties: hdfsUrl " + hdfsUrl);
                return null;
            }

        });
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, "Error initializing HdfsClient. Error:" + e);
    }
}

From source file:com.github.hdl.tensorflow.yarn.app.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 *
 * @throws YarnException/*from  w w  w  .j  a v a  2  s . c o  m*/
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.AbstractCallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(conf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(conf);
    nmClientAsync.start();

    appMasterHostname = System.getenv(Environment.NM_HOST.name());
    TFApplicationRpcServer rpcServer = new TFApplicationRpcServer(appMasterHostname, new RpcForClient());
    appMasterRpcPort = rpcServer.getRpcPort();
    rpcServer.startRpcServiceThread();

    // Register self with ResourceManager
    // This will start heartbeating to the RM

    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    long maxMem = response.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capability of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    for (Container container : previousAMRunningContainers) {
        launchedContainers.add(container.getId());
    }
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);

}

From source file:com.github.sakserv.minicluster.impl.KdcLocalCluster.java

License:Apache License

@Override
public void start() throws Exception {

    LOG.info("KDC: Starting MiniKdc");
    configure();//from   ww  w . java  2s  . c o m
    miniKdc = new MiniKdc(conf, new File(baseDir));
    miniKdc.start();

    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("guest");
    UserGroupInformation.setLoginUser(ugi);
    String username = UserGroupInformation.getLoginUser().getShortUserName();

    List<String> temp = new ArrayList<>(principals);
    temp.add(username);
    this.principals = Collections.unmodifiableList(temp);

    principals.forEach(p -> {
        try {
            File keytab = new File(baseDir, p + ".keytab");
            LOG.info("KDC: Creating keytab for {} in {}", p, keytab);
            miniKdc.createPrincipal(keytab, p, getKrbPrincipal(p), getKrbPrincipalWithRealm(p));
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
    });
    refreshDefaultRealm();
    prepareSecureConfiguration(username);
}