Example usage for org.apache.hadoop.security Credentials readTokenStorageFile

List of usage examples for org.apache.hadoop.security Credentials readTokenStorageFile

Introduction

In this page you can find the example usage for org.apache.hadoop.security Credentials readTokenStorageFile.

Prototype

public static Credentials readTokenStorageFile(File filename, Configuration conf) throws IOException 

Source Link

Document

Convenience method for reading a token storage file and loading its Tokens.

Usage

From source file:ImportTsv.java

License:Apache License

/**
 * Sets up the actual job.//  w  w  w .jav  a 2 s. com
 *
 * @param conf  The current configuration.
 * @param args  The command line parameters.
 * @return The newly created job.
 * @throws IOException When setting up the job fails.
 */
public static Job createSubmittableJob(Configuration conf, String[] args)
        throws IOException, ClassNotFoundException {
    Job job = null;
    try (Connection connection = ConnectionFactory.createConnection(conf)) {
        try (Admin admin = connection.getAdmin()) {
            // Support non-XML supported characters
            // by re-encoding the passed separator as a Base64 string.
            String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
            if (actualSeparator != null) {
                conf.set(SEPARATOR_CONF_KEY, Base64.encodeBytes(actualSeparator.getBytes()));
            }

            // See if a non-default Mapper was set
            String mapperClassName = conf.get(MAPPER_CONF_KEY);
            Class mapperClass = mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER;

            TableName tableName = TableName.valueOf(args[0]);
            Path inputDir = new Path(args[1]);

            // set filter
            conf.set(EASTCOM_FILTER_PARAMS, args[3]);
            conf.set(EASTCOM_FILTER_DEFINE, args[4]);

            String jobName = conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName.getNameAsString());
            job = Job.getInstance(conf, jobName);
            job.setJarByClass(mapperClass);
            FileInputFormat.setInputPaths(job, inputDir);
            job.setInputFormatClass(TextInputFormat.class);
            job.setMapperClass(mapperClass);
            String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
            String columns[] = conf.getStrings(COLUMNS_CONF_KEY);
            if (StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) {
                String fileLoc = conf.get(CREDENTIALS_LOCATION);
                Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf);
                job.getCredentials().addAll(cred);
            }

            if (hfileOutPath != null) {
                if (!admin.tableExists(tableName)) {
                    String errorMsg = format("Table '%s' does not exist.", tableName);
                    if ("yes".equalsIgnoreCase(conf.get(CREATE_TABLE_CONF_KEY, "yes"))) {
                        LOG.warn(errorMsg);
                        // TODO: this is backwards. Instead of depending on the existence of a table,
                        // create a sane splits file for HFileOutputFormat based on data sampling.
                        createTable(admin, tableName, columns);
                    } else {
                        LOG.error(errorMsg);
                        throw new TableNotFoundException(errorMsg);
                    }
                }
                try (HTable table = (HTable) connection.getTable(tableName)) {
                    boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false);
                    // if no.strict is false then check column family
                    if (!noStrict) {
                        ArrayList<String> unmatchedFamilies = new ArrayList<String>();
                        Set<String> cfSet = getColumnFamilies(columns);
                        HTableDescriptor tDesc = table.getTableDescriptor();
                        for (String cf : cfSet) {
                            if (tDesc.getFamily(Bytes.toBytes(cf)) == null) {
                                unmatchedFamilies.add(cf);
                            }
                        }
                        if (unmatchedFamilies.size() > 0) {
                            ArrayList<String> familyNames = new ArrayList<String>();
                            for (HColumnDescriptor family : table.getTableDescriptor().getFamilies()) {
                                familyNames.add(family.getNameAsString());
                            }
                            String msg = "Column Families " + unmatchedFamilies + " specified in "
                                    + COLUMNS_CONF_KEY + " does not match with any of the table " + tableName
                                    + " column families " + familyNames + ".\n"
                                    + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY
                                    + "=true.\n";
                            usage(msg);
                            System.exit(-1);
                        }
                    }
                    job.setReducerClass(PutSortReducer.class);
                    Path outputDir = new Path(hfileOutPath);
                    FileOutputFormat.setOutputPath(job, outputDir);
                    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
                    if (mapperClass.equals(TsvImporterTextMapper.class)) {
                        job.setMapOutputValueClass(Text.class);
                        job.setReducerClass(TextSortReducer.class);
                    } else {
                        job.setMapOutputValueClass(Put.class);
                        job.setCombinerClass(PutCombiner.class);
                    }
                    HFileOutputFormat2.configureIncrementalLoad(job, table, table);
                }
            } else {
                if (!admin.tableExists(tableName)) {
                    String errorMsg = format("Table '%s' does not exist.", tableName);
                    LOG.error(errorMsg);
                    throw new TableNotFoundException(errorMsg);
                }
                if (mapperClass.equals(TsvImporterTextMapper.class)) {
                    usage(TsvImporterTextMapper.class.toString()
                            + " should not be used for non bulkloading case. use "
                            + TsvImporterMapper.class.toString()
                            + " or custom mapper whose value type is Put.");
                    System.exit(-1);
                }
                // No reducers. Just write straight to table. Call initTableReducerJob
                // to set up the TableOutputFormat.
                TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, job);
                job.setNumReduceTasks(0);
            }

            TableMapReduceUtil.addDependencyJars(job);
            TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
                    com.google.common.base.Function.class /* Guava used by TsvParser */);
        }
    }
    return job;
}

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken// w  w w. j ava2s. c  o  m
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());
            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MAPREDUCE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                cancelNameNodeToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
        throws HadoopSecurityManagerException {
    // nntoken//from   www .  j a  v  a 2s .c o m
    Credentials cred = null;
    try {
        cred = Credentials.readTokenStorageFile(new Path(tokenFile.toURI()), new Configuration());
        for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {

            logger.info("Got token: " + t.toString());
            logger.info("Token kind: " + t.getKind());
            logger.info("Token id: " + new String(t.getIdentifier()));
            logger.info("Token service: " + t.getService());

            if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
                logger.info("Cancelling hive token " + new String(t.getIdentifier()));
                cancelHiveToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("RM_DELEGATION_TOKEN"))) {
                logger.info("Cancelling mr job tracker token " + new String(t.getIdentifier()));
                // cancelMRJobTrackerToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
                logger.info("Cancelling namenode token " + new String(t.getIdentifier()));
                // cancelNameNodeToken(t, userToProxy);
            } else if (t.getKind().equals(new Text("MR_DELEGATION_TOKEN"))) {
                logger.info("Cancelling jobhistoryserver mr token " + new String(t.getIdentifier()));
                // cancelJhsToken(t, userToProxy);
            } else {
                logger.info("unknown token type " + t.getKind());
            }
        }
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel tokens " + e.getMessage() + e.getCause(), e);
    }

}

From source file:com.cloudera.hue.CredentialsMerger.java

License:Apache License

/**
 * Merge several credentials files into one. Give the desired output file
 * first, followed by all of the input files.
 *
 * <p>File formats are tried in this order: TokenStorageFile, urlEncodedString.
 * </p>/*from w w  w .ja v a 2 s  . c om*/
 *
 * @param args &lt;out&gt; &lt;in1&gt; ...
 * @throws IOException  in the event of an error reading or writing files.
 */
public static void main(String[] args) throws IOException {
    if (args.length < 2) {
        printUsage();
        System.exit(1);
    }

    Path outputFile = new Path("file://" + new File(args[0]).getAbsolutePath());
    Configuration conf = new Configuration();
    Credentials credentials = new Credentials();

    for (int i = 1; i < args.length; i++) {
        try {
            Credentials singleFileCredentials = Credentials
                    .readTokenStorageFile(new Path("file://" + new File(args[i]).getAbsolutePath()), conf);
            credentials.addAll(singleFileCredentials);
        } catch (IOException e) {
            BufferedReader reader = new BufferedReader(new FileReader(args[i]));
            try {
                // Retry to read the token with an encodedUrl format
                Token<?> token = new Token();
                String encodedtoken = reader.readLine();
                token.decodeFromUrlString(encodedtoken);
                credentials.addToken(new Text(args[i]), token);
            } finally {
                reader.close();
            }
        }
    }

    credentials.writeTokenStorageFile(outputFile, conf);
}

From source file:gobblin.yarn.YarnHelixUtils.java

License:Apache License

/**
 * Read a collection {@link Token}s from a given file.
 *
 * @param tokenFilePath the token file path
 * @param configuration a {@link Configuration} object carrying Hadoop configuration properties
 * @return a collection of {@link Token}s
 * @throws IOException/*  w w w. j a v a2  s  . c om*/
 */
public static Collection<Token<? extends TokenIdentifier>> readTokensFromFile(Path tokenFilePath,
        Configuration configuration) throws IOException {
    return Credentials.readTokenStorageFile(tokenFilePath, configuration).getAllTokens();
}

From source file:org.apache.falcon.catalog.HiveCatalogService.java

License:Apache License

private static Credentials getCredentials(Configuration conf) throws IOException {
    final String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
    if (tokenFile == null) {
        return null;
    }/*from www  .  ja v a2  s  .c  o  m*/

    try {
        LOG.info("Adding credentials/delegation tokens from token file={} to conf", tokenFile);
        Credentials credentials = Credentials.readTokenStorageFile(new File(tokenFile), conf);
        LOG.info("credentials numberOfTokens={}, numberOfSecretKeys={}", credentials.numberOfTokens(),
                credentials.numberOfSecretKeys());
        return credentials;
    } catch (IOException e) {
        LOG.warn("error while fetching credentials from {}", tokenFile);
    }

    return null;
}

From source file:org.apache.falcon.hive.util.HiveMetastoreUtils.java

License:Apache License

private static HiveConf createHiveConf(Configuration conf, String metastoreUrl, String metastorePrincipal,
        String hive2Principal) throws IOException {
    JobConf jobConf = new JobConf(conf);
    String delegationToken = HiveDRUtils.getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION");
    if (delegationToken != null) {
        Credentials credentials = Credentials.readTokenStorageFile(new File(delegationToken), conf);
        jobConf.setCredentials(credentials);
        UserGroupInformation.getCurrentUser().addCredentials(credentials);
    }/*  ww w.j av a2 s .c  om*/

    HiveConf hcatConf = new HiveConf(jobConf, HiveConf.class);

    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUrl);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");

    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_REPL_TASK_FACTORY.varname, EximReplicationTaskFactory.class.getName());
    if (StringUtils.isNotEmpty(metastorePrincipal)) {
        hcatConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, metastorePrincipal);
        hcatConf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
        hcatConf.set(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
        hcatConf.set("hadoop.rpc.protection", "authentication");
    }
    if (StringUtils.isNotEmpty(hive2Principal)) {
        hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname, hive2Principal);
        hcatConf.set(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, "kerberos");
    }

    return hcatConf;
}

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    // Pass on the credentials from the hadoop token file if present.
    // The value in the token file takes precedence.
    if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
        Credentials tokenFileCredentials = Credentials
                .readTokenStorageFile(new File(System.getenv(HADOOP_TOKEN_FILE_LOCATION)), new Configuration());
        credentials.addAll(tokenFileCredentials);
    }/*from  www .j  a  va  2s.c  o m*/

    String tokenRenewer = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Failed to get master Kerberos principal for the RM to use as renewer");
    }

    // For now, only getting tokens for the default file-system.
    Token<?> tokens[] = this.fs.addDelegationTokens(tokenRenewer, credentials);
    if (tokens != null) {
        for (Token<?> token : tokens) {
            LOGGER.info("Got delegation token for " + this.fs.getUri() + "; " + token);
        }
    }

    Closer closer = Closer.create();
    try {
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);
        ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
        containerLaunchContext.setTokens(fsTokens);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.tez.util.SecurityHelper.java

License:Apache License

@SuppressWarnings("unchecked")
private static void readTokensFromFiles(Configuration conf, Credentials credentials) throws IOException {
    // add tokens and secrets coming from a token storage file
    String binaryTokenFilename = conf.get(MRConfiguration.JOB_CREDENTIALS_BINARY);
    if (binaryTokenFilename != null) {
        Credentials binary = Credentials.readTokenStorageFile(new Path("file:///" + binaryTokenFilename), conf);
        credentials.addAll(binary);/*from   w ww .j  ava2  s .  c  om*/
    }
    // add secret keys coming from a json file
    String tokensFileName = conf.get(MRConfiguration.JOB_CREDENTIALS_JSON);
    if (tokensFileName != null) {
        LOG.info("loading user's secret keys from " + tokensFileName);
        String localFileName = new Path(tokensFileName).toUri().getPath();

        boolean json_error = false;
        try {
            // read JSON
            ObjectMapper mapper = new ObjectMapper();
            Map<String, String> nm = mapper.readValue(new File(localFileName), Map.class);

            for (Map.Entry<String, String> ent : nm.entrySet()) {
                credentials.addSecretKey(new Text(ent.getKey()), ent.getValue().getBytes(Charsets.UTF_8));
            }
        } catch (JsonMappingException e) {
            json_error = true;
        } catch (JsonParseException e) {
            json_error = true;
        }
        if (json_error)
            LOG.warn("couldn't parse Token Cache JSON file with user secret keys");
    }
}

From source file:org.apache.slider.client.TokensOperation.java

License:Apache License

public int actionTokens(ActionTokensArgs args, FileSystem fs, Configuration conf, YarnClientImpl yarnClient)
        throws IOException, YarnException {
    Credentials credentials;//  w  w  w . ja va2  s .c  o m
    String footnote = "";
    UserGroupInformation user = UserGroupInformation.getCurrentUser();
    boolean isSecure = UserGroupInformation.isSecurityEnabled();
    if (args.keytab != null) {
        File keytab = args.keytab;
        if (!keytab.isFile()) {
            throw new NotFoundException(E_NO_KEYTAB + keytab.getAbsolutePath());
        }
        String principal = args.principal;
        log.info("Logging in as {} from keytab {}", principal, keytab);
        user = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getCanonicalPath());
    }
    Credentials userCredentials = user.getCredentials();
    File output = args.output;
    if (output != null) {
        if (!isSecure) {
            throw new BadClusterStateException(E_INSECURE);
        }
        credentials = new Credentials(userCredentials);
        // filesystem
        addRMRenewableFSDelegationTokens(conf, fs, credentials);
        addRMDelegationToken(yarnClient, credentials);
        if (maybeAddTimelineToken(conf, credentials) != null) {
            log.debug("Added timeline token");
        }
        saveTokens(output, credentials);
        String filename = output.getCanonicalPath();
        footnote = String.format(
                "%d tokens saved to %s\n" + "To use these in the environment:\n" + "export %s=%s",
                credentials.numberOfTokens(), filename, UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,
                filename);
    } else if (args.source != null) {
        File source = args.source;
        log.info("Reading credentials from file {}", source);
        if (!source.isFile()) {
            throw new NotFoundException(E_MISSING_SOURCE_FILE + source.getAbsolutePath());
        }
        credentials = Credentials.readTokenStorageFile(args.source, conf);
    } else {
        StringBuffer origin = new StringBuffer();
        File file = locateEnvCredentials(System.getenv(), conf, origin);
        if (file != null) {
            log.info("Credential Source {}", origin);
        } else {
            log.info("Credential source: logged in user");
        }
        credentials = userCredentials;
    }
    // list the tokens
    log.info("\n{}", dumpTokens(credentials, "\n"));
    if (!footnote.isEmpty()) {
        log.info(footnote);
    }
    return 0;
}