Example usage for org.apache.hadoop.security Credentials addToken

List of usage examples for org.apache.hadoop.security Credentials addToken

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials addToken.

Prototype

public void addToken(Text alias, Token<? extends TokenIdentifier> t) 

Source Link

Document

Add a token in the storage (in memory).

Usage

From source file:UnmanagedAMLauncher.java

License:Apache License

public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException {
    Credentials credentials = new Credentials();
    Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId());
    // Service will be empty but that's okay, we are just passing down only
    // AMRMToken down to the real AM which eventually sets the correct
    // service-address.
    credentials.addToken(token.getService(), token);
    File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir")));
    try {//from  w  w w. j av a 2  s . c  o  m
        FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    tokenFile.deleteOnExit();
    DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true));
    credentials.writeTokenStorageToStream(os);
    os.close();

    Map<String, String> env = System.getenv();
    ArrayList<String> envAMList = new ArrayList<String>();
    boolean setClasspath = false;
    for (Map.Entry<String, String> entry : env.entrySet()) {
        String key = entry.getKey();
        String value = entry.getValue();
        if (key.equals("CLASSPATH")) {
            setClasspath = true;
            if (classpath != null) {
                value = value + File.pathSeparator + classpath;
            }
        }
        envAMList.add(key + "=" + value);
    }

    if (!setClasspath && classpath != null) {
        envAMList.add("CLASSPATH=" + classpath);
    }
    ContainerId containerId = ContainerId.newContainerId(attemptId, 0);

    String hostname = InetAddress.getLocalHost().getHostName();
    envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
    envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
    envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
    envAMList.add(Environment.NM_PORT.name() + "=0");
    envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
    envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis());

    envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath());

    String[] envAM = new String[envAMList.size()];
    Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));

    final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream()));
    final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream()));

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.err.println(line);
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    Thread outThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = inReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.out.println(line);
                    line = inReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the out stream", ioe);
            }
        }
    };
    try {
        errThread.start();
        outThread.start();
    } catch (IllegalStateException ise) {
    }

    // wait for the process to finish and check the exit code
    try {
        int exitCode = amProc.waitFor();
        LOG.info("AM process exited with value: " + exitCode);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        amCompleted = true;
    }

    try {
        // make sure that the error thread exits
        // on Windows these threads sometimes get stuck and hang the execution
        // timeout and join later after destroying the process.
        errThread.join();
        outThread.join();
        errReader.close();
        inReader.close();
    } catch (InterruptedException ie) {
        LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie);
    } catch (IOException ioe) {
        LOG.warn("Error while closing the error/out stream", ioe);
    }
    amProc.destroy();
}

From source file:alluxio.yarn.Client.java

License:Apache License

private void setupContainerLaunchContext() throws IOException, YarnException {
    Map<String, String> applicationMasterArgs = ImmutableMap.<String, String>of("-num_workers",
            Integer.toString(mNumWorkers), "-master_address", mMasterAddress, "-resource_path", mResourcePath);

    final String amCommand = YarnUtils.buildCommand(YarnContainerType.APPLICATION_MASTER,
            applicationMasterArgs);/*w  w  w  .j a va2s .c  o  m*/

    System.out.println("ApplicationMaster command: " + amCommand);
    mAmContainer.setCommands(Collections.singletonList(amCommand));

    // Setup local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("alluxio.tar.gz",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.tar.gz"));
    localResources.put("alluxio-yarn-setup.sh",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio-yarn-setup.sh"));
    localResources.put("alluxio.jar",
            YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.jar"));
    mAmContainer.setLocalResources(localResources);

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    mAmContainer.setEnvironment(appMasterEnv);

    // Set up security tokens for launching our ApplicationMaster container.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = mYarnConf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(mYarnConf);
        // getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        // getting yarn resource manager token
        org.apache.hadoop.conf.Configuration config = mYarnClient.getConfig();
        Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(
                mYarnClient.getRMDelegationToken(new org.apache.hadoop.io.Text(tokenRenewer)),
                ClientRMProxy.getRMDelegationTokenService(config));
        LOG.info("Added RM delegation token: " + token);
        credentials.addToken(token.getService(), token);

        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        mAmContainer.setTokens(buffer);
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {/* w w w . j  av  a 2 s .  c o  m*/
            logger.info("Pre-fetching Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            logger.info("HiveConf.ConfVars.METASTOREURIS.varname "
                    + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
            logger.info("HIVE_METASTORE_SASL_ENABLED " + hiveConf.get(HIVE_METASTORE_SASL_ENABLED));
            logger.info("HIVE_METASTORE_KERBEROS_PRINCIPAL " + hiveConf.get(HIVE_METASTORE_KERBEROS_PRINCIPAL));
            logger.info("HIVE_METASTORE_LOCAL " + hiveConf.get(HIVE_METASTORE_LOCAL));

            HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
            String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
                    UserGroupInformation.getLoginUser().getShortUserName());
            Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
            hcatToken.decodeFromUrlString(hcatTokenStr);
            logger.info("Created hive metastore token: " + hcatTokenStr);
            logger.info("Token kind: " + hcatToken.getKind());
            logger.info("Token id: " + hcatToken.getIdentifier());
            logger.info("Token service: " + hcatToken.getService());
            cred.addToken(hcatToken.getService(), hcatToken);
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("Failed to get hive metastore token." + e.getMessage() + e.getCause());
        } catch (Throwable t) {
            t.printStackTrace();
            logger.error("Failed to get hive metastore token." + t.getMessage() + t.getCause());
        }
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs.getDelegationToken(userToProxy);
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());
                    cred.addToken(fsToken.getService(), fsToken);
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobClient jobClient = new JobClient(new JobConf());
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }
            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                dos.close();
            }
            if (fos != null) {
                fos.close();
            }
        }

        // stash them to cancel after use.
        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());
    } catch (Throwable t) {
        t.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause());
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens based on props for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {/*from w w  w.j a va 2 s. c o  m*/

            // first we fetch and save the default hcat token.
            logger.info("Pre-fetching default Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            Token<DelegationTokenIdentifier> hcatToken = fetchHcatToken(userToProxy, hiveConf, null, logger);

            cred.addToken(hcatToken.getService(), hcatToken);

            // check and see if user specified the extra hcat locations we need to
            // look at and fetch token.
            final List<String> extraHcatLocations = props.getStringList(EXTRA_HCAT_LOCATION);
            if (Collections.EMPTY_LIST != extraHcatLocations) {
                logger.info("Need to pre-fetch extra metaStore tokens from hive.");

                // start to process the user inputs.
                for (String thriftUrl : extraHcatLocations) {
                    logger.info("Pre-fetching metaStore token from : " + thriftUrl);

                    hiveConf = new HiveConf();
                    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
                    hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, logger);
                    cred.addToken(hcatToken.getService(), hcatToken);
                }

            }

        } catch (Throwable t) {
            String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause();
            logger.error(message, t);
            throw new HadoopSecurityManagerException(message);
        }
    }

    if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) {
        YarnRPC rpc = YarnRPC.create(conf);
        final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

        logger.debug("Connecting to HistoryServer at: " + serviceAddr);
        HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
                NetUtils.createSocketAddr(serviceAddr), conf);
        logger.info("Pre-fetching JH token from job history server");

        Token<?> jhsdt = null;
        try {
            jhsdt = getDelegationTokenFromHS(hsProxy);
        } catch (Exception e) {
            logger.error("Failed to fetch JH token", e);
            throw new HadoopSecurityManagerException("Failed to fetch JH token for " + userToProxy);
        }

        if (jhsdt == null) {
            logger.error("getDelegationTokenFromHS() returned null");
            throw new HadoopSecurityManagerException("Unable to fetch JH token for " + userToProxy);
        }

        logger.info("Created JH token: " + jhsdt.toString());
        logger.info("Token kind: " + jhsdt.getKind());
        logger.info("Token id: " + jhsdt.getIdentifier());
        logger.info("Token service: " + jhsdt.getService());

        cred.addToken(jhsdt.getService(), jhsdt);
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs
                            .getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());

                    cred.addToken(fsToken.getService(), fsToken);

                    // getting additional name nodes tokens
                    String otherNamenodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN);
                    if ((otherNamenodes != null) && (otherNamenodes.length() > 0)) {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + ": '" + otherNamenodes + "'");
                        String[] nameNodeArr = otherNamenodes.split(",");
                        Path[] ps = new Path[nameNodeArr.length];
                        for (int i = 0; i < ps.length; i++) {
                            ps[i] = new Path(nameNodeArr[i].trim());
                        }
                        TokenCache.obtainTokensForNamenodes(cred, ps, conf);
                        logger.info("Successfully fetched tokens for: " + otherNamenodes);
                    } else {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + " was not configured");
                    }
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobConf jobConf = new JobConf();
                    JobClient jobClient = new JobClient(jobConf);
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient
                            .getDelegationToken(getMRTokenRenewerInternal(jobConf));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }

            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                try {
                    dos.close();
                } catch (Throwable t) {
                    // best effort
                    logger.error("encountered exception while closing DataOutputStream of the tokenFile", t);
                }
            }
            if (fos != null) {
                fos.close();
            }
        }
        // stash them to cancel after use.

        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause(), e);
    } catch (Throwable t) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause(), t);
    }

}

From source file:co.cask.cdap.common.security.YarnTokenUtils.java

License:Apache License

/**
 * Gets a Yarn delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 */// w  w  w  . j a  v a  2 s  . co m
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    try {
        YarnClient yarnClient = YarnClient.createYarnClient();
        yarnClient.init(configuration);
        yarnClient.start();

        try {
            Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
            org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient
                    .getRMDelegationToken(renewer);

            // TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
            // CDAP-4825 is resolved
            List<String> services = new ArrayList<>();
            if (HAUtil.isHAEnabled(configuration)) {
                // If HA is enabled, we need to enumerate all RM hosts
                // and add the corresponding service name to the token service
                // Copy the yarn conf since we need to modify it to get the RM addresses
                YarnConfiguration yarnConf = new YarnConfiguration(configuration);
                for (String rmId : HAUtil.getRMHAIds(configuration)) {
                    yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
                    InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                            YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
                    services.add(SecurityUtil.buildTokenService(address).toString());
                }
            } else {
                services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
            }

            Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken,
                    (InetSocketAddress) null);
            token.setService(new Text(Joiner.on(',').join(services)));
            credentials.addToken(new Text(token.getService()), token);

            // OK to log, it won't log the credential, only information about the token.
            LOG.info("Added RM delegation token: {}", token);

        } finally {
            yarnClient.stop();
        }

        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for Yarn.", e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.data.security.HBaseTokenUtils.java

License:Apache License

/**
 * Gets a HBase delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *//*from   www . j  av  a 2s .  c o  m*/
public static Credentials obtainToken(Configuration hConf, Credentials credentials) {
    if (!User.isHBaseSecurityEnabled(hConf)) {
        return credentials;
    }

    try {
        Class c = Class.forName("org.apache.hadoop.hbase.security.token.TokenUtil");
        Method method = c.getMethod("obtainToken", Configuration.class);

        Token<? extends TokenIdentifier> token = castToken(method.invoke(null, hConf));
        credentials.addToken(token.getService(), token);

        return credentials;

    } catch (Exception e) {
        LOG.error("Failed to get secure token for HBase.", e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.explore.security.HiveTokenUtils.java

License:Apache License

public static Credentials obtainToken(Credentials credentials) {
    ClassLoader hiveClassloader = ExploreServiceUtils.getExploreClassLoader();
    ClassLoader contextClassloader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(hiveClassloader);

    try {//from   w ww . j  a  va 2s.c  o m
        LOG.info("Obtaining delegation token for Hive");
        Class hiveConfClass = hiveClassloader.loadClass("org.apache.hadoop.hive.conf.HiveConf");
        Object hiveConf = hiveConfClass.newInstance();

        Class hiveClass = hiveClassloader.loadClass("org.apache.hadoop.hive.ql.metadata.Hive");
        @SuppressWarnings("unchecked")
        Method hiveGet = hiveClass.getMethod("get", hiveConfClass);
        Object hiveObject = hiveGet.invoke(null, hiveConf);

        String user = UserGroupInformation.getCurrentUser().getShortUserName();
        @SuppressWarnings("unchecked")
        Method getDelegationToken = hiveClass.getMethod("getDelegationToken", String.class, String.class);
        String tokenStr = (String) getDelegationToken.invoke(hiveObject, user, user);

        Token<DelegationTokenIdentifier> delegationToken = new Token<>();
        delegationToken.decodeFromUrlString(tokenStr);
        delegationToken.setService(new Text(HiveAuthFactory.HS2_CLIENT_TOKEN));
        LOG.info("Adding delegation token {} from MetaStore for service {} for user {}", delegationToken,
                delegationToken.getService(), user);
        credentials.addToken(delegationToken.getService(), delegationToken);
        return credentials;
    } catch (Exception e) {
        LOG.error("Exception when fetching delegation token from Hive MetaStore", e);
        throw Throwables.propagate(e);
    } finally {
        Thread.currentThread().setContextClassLoader(contextClassloader);
    }
}

From source file:co.cask.cdap.explore.security.JobHistoryServerTokenUtils.java

License:Apache License

/**
 * Gets a JHS delegation token and stores it in the given Credentials.
 *
 * @return the same Credentials instance as the one given in parameter.
 *//*from   w  ww  . ja v a 2s. c o m*/
public static Credentials obtainToken(Configuration configuration, Credentials credentials) {
    if (!UserGroupInformation.isSecurityEnabled()) {
        return credentials;
    }

    String historyServerAddress = configuration.get("mapreduce.jobhistory.address");
    HostAndPort hostAndPort = HostAndPort.fromString(historyServerAddress);
    try {
        LOG.info("Obtaining delegation token for JHS");

        ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(new YarnConfiguration(configuration));
        MRClientCache clientCache = new MRClientCache(configuration, resourceMgrDelegate);
        MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
        GetDelegationTokenRequest request = new GetDelegationTokenRequestPBImpl();
        request.setRenewer(YarnUtils.getYarnTokenRenewer(configuration));

        InetSocketAddress address = new InetSocketAddress(hostAndPort.getHostText(), hostAndPort.getPort());
        Token<TokenIdentifier> token = ConverterUtils
                .convertFromYarn(hsProxy.getDelegationToken(request).getDelegationToken(), address);

        credentials.addToken(new Text(token.getService()), token);
        return credentials;
    } catch (Exception e) {
        LOG.error("Failed to get secure token for JHS at {}.", hostAndPort, e);
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.security.hive.HiveTokenUtils.java

License:Apache License

public static Credentials obtainToken(Credentials credentials) {
    ClassLoader hiveClassloader = ExploreUtils.getExploreClassloader();
    ClassLoader contextClassloader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(hiveClassloader);

    try {/*  w  w w  .j a v  a  2  s. c  om*/
        LOG.info("Obtaining delegation token for Hive");
        Class hiveConfClass = hiveClassloader.loadClass("org.apache.hadoop.hive.conf.HiveConf");
        Object hiveConf = hiveConfClass.newInstance();

        Class hiveClass = hiveClassloader.loadClass("org.apache.hadoop.hive.ql.metadata.Hive");
        @SuppressWarnings("unchecked")
        Method hiveGet = hiveClass.getMethod("get", hiveConfClass);
        Object hiveObject = hiveGet.invoke(null, hiveConf);

        String user = UserGroupInformation.getCurrentUser().getShortUserName();
        @SuppressWarnings("unchecked")
        Method getDelegationToken = hiveClass.getMethod("getDelegationToken", String.class, String.class);
        String tokenStr = (String) getDelegationToken.invoke(hiveObject, user, user);

        Token<DelegationTokenIdentifier> delegationToken = new Token<>();
        delegationToken.decodeFromUrlString(tokenStr);
        delegationToken.setService(new Text(HiveAuthFactory.HS2_CLIENT_TOKEN));
        LOG.info("Adding delegation token {} from MetaStore for service {} for user {}", delegationToken,
                delegationToken.getService(), user);
        credentials.addToken(delegationToken.getService(), delegationToken);
        return credentials;
    } catch (Exception e) {
        LOG.error("Exception when fetching delegation token from Hive MetaStore", e);
        throw Throwables.propagate(e);
    } finally {
        Thread.currentThread().setContextClassLoader(contextClassloader);
    }
}

From source file:com.cloudera.hue.CredentialsMerger.java

License:Apache License

/**
 * Merge several credentials files into one. Give the desired output file
 * first, followed by all of the input files.
 *
 * <p>File formats are tried in this order: TokenStorageFile, urlEncodedString.
 * </p>//from w w  w  .ja v  a2s  .c  o m
 *
 * @param args &lt;out&gt; &lt;in1&gt; ...
 * @throws IOException  in the event of an error reading or writing files.
 */
public static void main(String[] args) throws IOException {
    if (args.length < 2) {
        printUsage();
        System.exit(1);
    }

    Path outputFile = new Path("file://" + new File(args[0]).getAbsolutePath());
    Configuration conf = new Configuration();
    Credentials credentials = new Credentials();

    for (int i = 1; i < args.length; i++) {
        try {
            Credentials singleFileCredentials = Credentials
                    .readTokenStorageFile(new Path("file://" + new File(args[i]).getAbsolutePath()), conf);
            credentials.addAll(singleFileCredentials);
        } catch (IOException e) {
            BufferedReader reader = new BufferedReader(new FileReader(args[i]));
            try {
                // Retry to read the token with an encodedUrl format
                Token<?> token = new Token();
                String encodedtoken = reader.readLine();
                token.decodeFromUrlString(encodedtoken);
                credentials.addToken(new Text(args[i]), token);
            } finally {
                reader.close();
            }
        }
    }

    credentials.writeTokenStorageFile(outputFile, conf);
}