Example usage for org.apache.hadoop.security.token Token getIdentifier

List of usage examples for org.apache.hadoop.security.token Token getIdentifier

Introduction

In this page you can find the example usage for org.apache.hadoop.security.token Token getIdentifier.

Prototype

public byte[] getIdentifier() 

Source Link

Document

Get the token identifier's byte representation.

Usage

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final String userToProxy, final Logger logger)
        throws HadoopSecurityManagerException {

    logger.info("Getting hadoop tokens for " + userToProxy);

    try {/*from  ww w.ja va 2  s  . c  om*/
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {

                FileSystem fs = FileSystem.get(conf);
                // check if we get the correct FS, and most importantly, the conf
                logger.info("Getting DFS token from " + fs.getCanonicalServiceName() + fs.getUri());
                Token<?> fsToken = fs.getDelegationToken(userToProxy);
                if (fsToken == null) {
                    logger.error("Failed to fetch DFS token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch DFS token for " + userToProxy);
                }
                logger.info("Created DFS token: " + fsToken.toString());
                logger.info("Token kind: " + fsToken.getKind());
                logger.info("Token id: " + fsToken.getIdentifier());
                logger.info("Token service: " + fsToken.getService());

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching JT token: Got new JobClient: " + jc);

                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                if (mrdt == null) {
                    logger.error("Failed to fetch JT token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                }
                logger.info("Created JT token: " + mrdt.toString());
                logger.info("Token kind: " + mrdt.getKind());
                logger.info("Token id: " + mrdt.getIdentifier());
                logger.info("Token service: " + mrdt.getService());

                jc.getCredentials().addToken(mrdt.getService(), mrdt);
                jc.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    jc.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        dos.close();
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                // stash them to cancel after use.
                logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
            }
        });
    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());

    }
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken/*  w w w  . ja  va 2s  . c  o m*/
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());
            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MAPREDUCE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                cancelNameNodeToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {/*from www.  ja v a 2 s .c  o m*/
            logger.info("Pre-fetching Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            logger.info("HiveConf.ConfVars.METASTOREURIS.varname "
                    + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));
            logger.info("HIVE_METASTORE_SASL_ENABLED " + hiveConf.get(HIVE_METASTORE_SASL_ENABLED));
            logger.info("HIVE_METASTORE_KERBEROS_PRINCIPAL " + hiveConf.get(HIVE_METASTORE_KERBEROS_PRINCIPAL));
            logger.info("HIVE_METASTORE_LOCAL " + hiveConf.get(HIVE_METASTORE_LOCAL));

            HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
            String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
                    UserGroupInformation.getLoginUser().getShortUserName());
            Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
            hcatToken.decodeFromUrlString(hcatTokenStr);
            logger.info("Created hive metastore token: " + hcatTokenStr);
            logger.info("Token kind: " + hcatToken.getKind());
            logger.info("Token id: " + hcatToken.getIdentifier());
            logger.info("Token service: " + hcatToken.getService());
            cred.addToken(hcatToken.getService(), hcatToken);
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("Failed to get hive metastore token." + e.getMessage() + e.getCause());
        } catch (Throwable t) {
            t.printStackTrace();
            logger.error("Failed to get hive metastore token." + t.getMessage() + t.getCause());
        }
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs.getDelegationToken(userToProxy);
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());
                    cred.addToken(fsToken.getService(), fsToken);
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobClient jobClient = new JobClient(new JobConf());
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }
            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                dos.close();
            }
            if (fos != null) {
                fos.close();
            }
        }

        // stash them to cancel after use.
        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());
    } catch (Throwable t) {
        t.printStackTrace();
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause());
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final String userToProxy, final Logger logger)
        throws HadoopSecurityManagerException {

    logger.info("Getting hadoop tokens for " + userToProxy);

    try {//from  w  w w . j  a  va 2 s  .co m
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {

                FileSystem fs = FileSystem.get(conf);
                // check if we get the correct FS, and most importantly, the conf
                logger.info("Getting DFS token from " + fs.getCanonicalServiceName() + fs.getUri());
                Token<?> fsToken = fs.getDelegationToken(userToProxy);
                if (fsToken == null) {
                    logger.error("Failed to fetch DFS token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch DFS token for " + userToProxy);
                }
                logger.info("Created DFS token: " + fsToken.toString());
                logger.info("Token kind: " + fsToken.getKind());
                logger.info("Token id: " + fsToken.getIdentifier());
                logger.info("Token service: " + fsToken.getService());

                JobConf jc = new JobConf(conf);
                JobClient jobClient = new JobClient(jc);
                logger.info("Pre-fetching JT token: Got new JobClient: " + jc);

                Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token"));
                if (mrdt == null) {
                    logger.error("Failed to fetch JT token for ");
                    throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                }
                logger.info("Created JT token: " + mrdt.toString());
                logger.info("Token kind: " + mrdt.getKind());
                logger.info("Token id: " + mrdt.getIdentifier());
                logger.info("Token service: " + mrdt.getService());

                jc.getCredentials().addToken(mrdt.getService(), mrdt);
                jc.getCredentials().addToken(fsToken.getService(), fsToken);

                FileOutputStream fos = null;
                DataOutputStream dos = null;
                try {
                    fos = new FileOutputStream(tokenFile);
                    dos = new DataOutputStream(fos);
                    jc.getCredentials().writeTokenStorageToStream(dos);
                } finally {
                    if (dos != null) {
                        try {
                            dos.close();
                        } catch (Throwable t) {
                            // best effort
                            logger.error(
                                    "encountered exception while closing DataOutputStream of the tokenFile", t);
                        }
                    }
                    if (fos != null) {
                        fos.close();
                    }
                }
                // stash them to cancel after use.
                logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());
            }
        });
    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause());

    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private void cancelJhsToken(final Token<? extends TokenIdentifier> t, String userToProxy)
        throws HadoopSecurityManagerException {
    // it appears yarn would clean up this token after app finish, after a long
    // while though.
    org.apache.hadoop.yarn.api.records.Token token = org.apache.hadoop.yarn.api.records.Token
            .newInstance(t.getIdentifier(), t.getKind().toString(), t.getPassword(), t.getService().toString());
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress jhsAddress = SecurityUtil.getTokenServiceAddr(t);
    MRClientProtocol jhsProxy = null;//from   w  w w  .j  a va  2  s  .c  o m
    try {
        jhsProxy = UserGroupInformation.getCurrentUser().doAs(new PrivilegedAction<MRClientProtocol>() {
            @Override
            public MRClientProtocol run() {
                return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class, jhsAddress, conf);
            }
        });
        CancelDelegationTokenRequest request = Records.newRecord(CancelDelegationTokenRequest.class);
        request.setDelegationToken(token);
        jhsProxy.cancelDelegationToken(request);
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel token. " + e.getMessage() + e.getCause(), e);
    } finally {
        RPC.stopProxy(jhsProxy);
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken//from  ww  w.  j  av  a  2  s  .c om
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {

            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());

            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("RM_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                // cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                // cancelNameNodeToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MR_DELEGATION_TOKEN"))) {
                logger.info("Cancelling jobhistoryserver mr token " + new String(t.getIdentifier()));
                // cancelJhsToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel tokens " + e.getMessage() + e.getCause(), e);
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

/**
 * function to fetch hcat token as per the specified hive configuration and
 * then store the token in to the credential store specified .
 *
 * @param userToProxy String value indicating the name of the user the token
 *          will be fetched for./*from w w w. jav a  2s  .c om*/
 * @param hiveConf the configuration based off which the hive client will be
 *          initialized.
 * @param logger the logger instance which writes the logging content to the
 *          job logs.
 *
 * @throws IOException
 * @throws TException
 * @throws MetaException
 *
 * */
private Token<DelegationTokenIdentifier> fetchHcatToken(String userToProxy, HiveConf hiveConf,
        String tokenSignatureOverwrite, final Logger logger) throws IOException, MetaException, TException {

    logger.info(HiveConf.ConfVars.METASTOREURIS.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname));

    logger.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname));

    logger.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": "
            + hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname));

    HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
    String hcatTokenStr = hiveClient.getDelegationToken(userToProxy,
            UserGroupInformation.getLoginUser().getShortUserName());
    Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
    hcatToken.decodeFromUrlString(hcatTokenStr);

    // overwrite the value of the service property of the token if the signature
    // override is specified.
    if (tokenSignatureOverwrite != null && tokenSignatureOverwrite.trim().length() > 0) {
        hcatToken.setService(new Text(tokenSignatureOverwrite.trim().toLowerCase()));

        logger.info(HIVE_TOKEN_SIGNATURE_KEY + ":"
                + (tokenSignatureOverwrite == null ? "" : tokenSignatureOverwrite));
    }

    logger.info("Created hive metastore token: " + hcatTokenStr);
    logger.info("Token kind: " + hcatToken.getKind());
    logger.info("Token id: " + hcatToken.getIdentifier());
    logger.info("Token service: " + hcatToken.getService());
    return hcatToken;
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger)
        throws HadoopSecurityManagerException {

    final String userToProxy = props.getString(USER_TO_PROXY);

    logger.info("Getting hadoop tokens based on props for " + userToProxy);

    final Credentials cred = new Credentials();

    if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) {
        try {//  w ww  .ja  va  2  s.co m

            // first we fetch and save the default hcat token.
            logger.info("Pre-fetching default Hive MetaStore token from hive");

            HiveConf hiveConf = new HiveConf();
            Token<DelegationTokenIdentifier> hcatToken = fetchHcatToken(userToProxy, hiveConf, null, logger);

            cred.addToken(hcatToken.getService(), hcatToken);

            // check and see if user specified the extra hcat locations we need to
            // look at and fetch token.
            final List<String> extraHcatLocations = props.getStringList(EXTRA_HCAT_LOCATION);
            if (Collections.EMPTY_LIST != extraHcatLocations) {
                logger.info("Need to pre-fetch extra metaStore tokens from hive.");

                // start to process the user inputs.
                for (String thriftUrl : extraHcatLocations) {
                    logger.info("Pre-fetching metaStore token from : " + thriftUrl);

                    hiveConf = new HiveConf();
                    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl);
                    hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, logger);
                    cred.addToken(hcatToken.getService(), hcatToken);
                }

            }

        } catch (Throwable t) {
            String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause();
            logger.error(message, t);
            throw new HadoopSecurityManagerException(message);
        }
    }

    if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) {
        YarnRPC rpc = YarnRPC.create(conf);
        final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);

        logger.debug("Connecting to HistoryServer at: " + serviceAddr);
        HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class,
                NetUtils.createSocketAddr(serviceAddr), conf);
        logger.info("Pre-fetching JH token from job history server");

        Token<?> jhsdt = null;
        try {
            jhsdt = getDelegationTokenFromHS(hsProxy);
        } catch (Exception e) {
            logger.error("Failed to fetch JH token", e);
            throw new HadoopSecurityManagerException("Failed to fetch JH token for " + userToProxy);
        }

        if (jhsdt == null) {
            logger.error("getDelegationTokenFromHS() returned null");
            throw new HadoopSecurityManagerException("Unable to fetch JH token for " + userToProxy);
        }

        logger.info("Created JH token: " + jhsdt.toString());
        logger.info("Token kind: " + jhsdt.getKind());
        logger.info("Token id: " + jhsdt.getIdentifier());
        logger.info("Token service: " + jhsdt.getService());

        cred.addToken(jhsdt.getService(), jhsdt);
    }

    try {
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                getToken(userToProxy);
                return null;
            }

            private void getToken(String userToProxy)
                    throws InterruptedException, IOException, HadoopSecurityManagerException {
                logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": "
                        + props.getBoolean(OBTAIN_NAMENODE_TOKEN));
                if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) {
                    FileSystem fs = FileSystem.get(conf);
                    // check if we get the correct FS, and most importantly, the
                    // conf
                    logger.info("Getting DFS token from " + fs.getUri());
                    Token<?> fsToken = fs
                            .getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString());
                    if (fsToken == null) {
                        logger.error("Failed to fetch DFS token for ");
                        throw new HadoopSecurityManagerException(
                                "Failed to fetch DFS token for " + userToProxy);
                    }
                    logger.info("Created DFS token: " + fsToken.toString());
                    logger.info("Token kind: " + fsToken.getKind());
                    logger.info("Token id: " + fsToken.getIdentifier());
                    logger.info("Token service: " + fsToken.getService());

                    cred.addToken(fsToken.getService(), fsToken);

                    // getting additional name nodes tokens
                    String otherNamenodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN);
                    if ((otherNamenodes != null) && (otherNamenodes.length() > 0)) {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + ": '" + otherNamenodes + "'");
                        String[] nameNodeArr = otherNamenodes.split(",");
                        Path[] ps = new Path[nameNodeArr.length];
                        for (int i = 0; i < ps.length; i++) {
                            ps[i] = new Path(nameNodeArr[i].trim());
                        }
                        TokenCache.obtainTokensForNamenodes(cred, ps, conf);
                        logger.info("Successfully fetched tokens for: " + otherNamenodes);
                    } else {
                        logger.info(OTHER_NAMENODES_TO_GET_TOKEN + " was not configured");
                    }
                }

                if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) {
                    JobConf jobConf = new JobConf();
                    JobClient jobClient = new JobClient(jobConf);
                    logger.info("Pre-fetching JT token from JobTracker");

                    Token<DelegationTokenIdentifier> mrdt = jobClient
                            .getDelegationToken(getMRTokenRenewerInternal(jobConf));
                    if (mrdt == null) {
                        logger.error("Failed to fetch JT token");
                        throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy);
                    }
                    logger.info("Created JT token: " + mrdt.toString());
                    logger.info("Token kind: " + mrdt.getKind());
                    logger.info("Token id: " + mrdt.getIdentifier());
                    logger.info("Token service: " + mrdt.getService());
                    cred.addToken(mrdt.getService(), mrdt);
                }

            }
        });

        FileOutputStream fos = null;
        DataOutputStream dos = null;
        try {
            fos = new FileOutputStream(tokenFile);
            dos = new DataOutputStream(fos);
            cred.writeTokenStorageToStream(dos);
        } finally {
            if (dos != null) {
                try {
                    dos.close();
                } catch (Throwable t) {
                    // best effort
                    logger.error("encountered exception while closing DataOutputStream of the tokenFile", t);
                }
            }
            if (fos != null) {
                fos.close();
            }
        }
        // stash them to cancel after use.

        logger.info("Tokens loaded in " + tokenFile.getAbsolutePath());

    } catch (Exception e) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + e.getMessage() + e.getCause(), e);
    } catch (Throwable t) {
        throw new HadoopSecurityManagerException(
                "Failed to get hadoop tokens! " + t.getMessage() + t.getCause(), t);
    }

}

From source file:co.cask.cdap.app.runtime.spark.SparkCredentialsUpdater.java

License:Apache License

@VisibleForTesting
long getNextUpdateDelay(Credentials credentials) throws IOException {
    long now = System.currentTimeMillis();

    // This is almost the same logic as in SparkHadoopUtil.getTimeFromNowToRenewal
    for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
        if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(token.getKind())) {
            DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
            try (DataInputStream input = new DataInputStream(new ByteArrayInputStream(token.getIdentifier()))) {
                identifier.readFields(input);

                // speed up by 2 seconds to account for any time race between driver and executor
                return Math.max(0L, (long) (identifier.getIssueDate() + 0.8 * updateIntervalMs) - now - 2000);
            }// w ww .jav a2 s . co  m
        }
    }
    return 0L;
}

From source file:co.cask.cdap.security.impersonation.UGIProviderTest.java

License:Apache License

@Test
public void testRemoteUGIProvider() throws Exception {
    // Starts a mock server to handle remote UGI requests
    final NettyHttpService httpService = NettyHttpService.builder("remoteUGITest")
            .addHttpHandlers(Collections.singleton(new UGIProviderTestHandler())).build();

    httpService.startAndWait();/*from   w ww.  ja  v  a 2s  .  co  m*/
    try {
        InMemoryDiscoveryService discoveryService = new InMemoryDiscoveryService();
        discoveryService
                .register(new Discoverable(Constants.Service.APP_FABRIC_HTTP, httpService.getBindAddress()));

        // Create Alice UGI
        RemoteUGIProvider ugiProvider = new RemoteUGIProvider(cConf, discoveryService, locationFactory);
        ImpersonationInfo aliceInfo = new ImpersonationInfo(getPrincipal("alice"),
                keytabFile.toURI().toString());
        UserGroupInformation aliceUGI = ugiProvider.getConfiguredUGI(aliceInfo);

        // Shouldn't be a kerberos UGI
        Assert.assertFalse(aliceUGI.hasKerberosCredentials());
        // Validate the credentials
        Token<? extends TokenIdentifier> token = aliceUGI.getCredentials().getToken(new Text("principal"));
        Assert.assertArrayEquals(aliceInfo.getPrincipal().getBytes(StandardCharsets.UTF_8),
                token.getIdentifier());
        Assert.assertArrayEquals(aliceInfo.getPrincipal().getBytes(StandardCharsets.UTF_8),
                token.getPassword());
        Assert.assertEquals(new Text("principal"), token.getKind());
        Assert.assertEquals(new Text("service"), token.getService());

        token = aliceUGI.getCredentials().getToken(new Text("keytab"));
        Assert.assertArrayEquals(aliceInfo.getKeytabURI().getBytes(StandardCharsets.UTF_8),
                token.getIdentifier());
        Assert.assertArrayEquals(aliceInfo.getKeytabURI().getBytes(StandardCharsets.UTF_8),
                token.getPassword());
        Assert.assertEquals(new Text("keytab"), token.getKind());
        Assert.assertEquals(new Text("service"), token.getService());

        // Fetch it again, it should return the same UGI due to caching
        Assert.assertSame(aliceUGI, ugiProvider.getConfiguredUGI(aliceInfo));

        // Invalid the cache and fetch it again. A different UGI should be returned
        ugiProvider.invalidCache();
        Assert.assertNotSame(aliceUGI, ugiProvider.getConfiguredUGI(aliceInfo));

    } finally {
        httpService.stopAndWait();
    }
}