Example usage for org.apache.hadoop.mapred JobConf getCredentials

List of usage examples for org.apache.hadoop.mapred JobConf getCredentials

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf getCredentials.

Prototype

public Credentials getCredentials() 

Source Link

Document

Get credentials for the job.

Usage

From source file:org.apache.flink.batch.connectors.hive.HiveTableInputFormat.java

License:Apache License

public HiveTableInputFormat(JobConf jobConf, boolean isPartitioned, String[] partitionColNames,
        List<HiveTablePartition> partitions, RowTypeInfo rowTypeInfo) {
    super(jobConf.getCredentials());
    this.rowTypeInfo = checkNotNull(rowTypeInfo, "rowTypeInfo can not be null.");
    this.jobConf = new JobConf(jobConf);
    this.isPartitioned = isPartitioned;
    this.partitionColNames = partitionColNames;
    this.partitions = checkNotNull(partitions, "partitions can not be null.");
}

From source file:org.apache.flink.batch.connectors.hive.HiveTableOutputFormat.java

License:Apache License

public HiveTableOutputFormat(JobConf jobConf, String dbName, String tableName, List<String> partitionCols,
        RowTypeInfo rowTypeInfo, HiveTablePartition hiveTablePartition, Properties tblProperties,
        boolean overwrite) {
    super(jobConf.getCredentials());

    Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(dbName), "DB name is empty");
    Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(tableName), "Table name is empty");
    Preconditions.checkNotNull(rowTypeInfo, "RowTypeInfo cannot be null");
    Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null");
    Preconditions.checkNotNull(tblProperties, "Table properties cannot be null");

    HadoopUtils.mergeHadoopConf(jobConf);
    this.jobConf = jobConf;
    this.dbName = dbName;
    this.tableName = tableName;
    this.partitionCols = partitionCols;
    this.rowTypeInfo = rowTypeInfo;
    this.hiveTablePartition = hiveTablePartition;
    this.tblProperties = tblProperties;
    this.overwrite = overwrite;
    isPartitioned = partitionCols != null && !partitionCols.isEmpty();
    isDynamicPartition = isPartitioned && partitionCols.size() > hiveTablePartition.getPartitionSpec().size();
}

From source file:org.apache.hcatalog.hbase.HBaseBulkOutputFormat.java

License:Apache License

private void addJTDelegationToken(JobConf job) throws IOException {
    // Get jobTracker delegation token if security is enabled
    // we need to launch the ImportSequenceFile job
    if (User.isSecurityEnabled()) {
        JobClient jobClient = new JobClient(new JobConf(job));
        try {//  w w w  .  java2  s .c  o m
            job.getCredentials().addToken(new Text("my mr token"), jobClient.getDelegationToken(null));
        } catch (InterruptedException e) {
            throw new IOException("Error while getting JT delegation token", e);
        }
    }
}

From source file:org.apache.hcatalog.hbase.HBaseHCatStorageHandler.java

License:Apache License

@Override
public void configureInputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) {
    // Populate jobProperties with input table name, table columns, RM snapshot,
    // hbase-default.xml and hbase-site.xml
    Map<String, String> tableJobProperties = tableDesc.getJobProperties();
    String jobString = tableJobProperties.get(HCatConstants.HCAT_KEY_JOB_INFO);
    try {/* w  w w .  j  a  v a2  s .com*/
        InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);
        HCatTableInfo tableInfo = inputJobInfo.getTableInfo();
        String qualifiedTableName = HBaseHCatStorageHandler.getFullyQualifiedHBaseTableName(tableInfo);
        jobProperties.put(TableInputFormat.INPUT_TABLE, qualifiedTableName);

        Configuration jobConf = getJobConf();
        addResources(jobConf, jobProperties);
        JobConf copyOfConf = new JobConf(jobConf);
        HBaseConfiguration.addHbaseResources(copyOfConf);
        //Getting hbase delegation token in getInputSplits does not work with PIG. So need to
        //do it here
        if (jobConf instanceof JobConf) { //Should be the case
            HBaseUtil.addHBaseDelegationToken(copyOfConf);
            ((JobConf) jobConf).getCredentials().addAll(copyOfConf.getCredentials());
        }

        String outputSchema = jobConf.get(HCatConstants.HCAT_KEY_OUTPUT_SCHEMA);
        jobProperties.put(TableInputFormat.SCAN_COLUMNS, getScanColumns(tableInfo, outputSchema));

        String serSnapshot = (String) inputJobInfo.getProperties()
                .get(HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY);
        if (serSnapshot == null) {
            HCatTableSnapshot snapshot = HBaseRevisionManagerUtil.createSnapshot(
                    RevisionManagerConfiguration.create(copyOfConf), qualifiedTableName, tableInfo);
            jobProperties.put(HBaseConstants.PROPERTY_TABLE_SNAPSHOT_KEY, HCatUtil.serialize(snapshot));
        }

        //This adds it directly to the jobConf. Setting in jobProperties does not get propagated
        //to JobConf as of now as the jobProperties is maintained per partition
        //TODO: Remove when HCAT-308 is fixed
        addOutputDependencyJars(jobConf);
        jobProperties.put("tmpjars", jobConf.get("tmpjars"));

    } catch (IOException e) {
        throw new IllegalStateException("Error while configuring job properties", e);
    }
}

From source file:org.apache.hcatalog.hcatmix.load.HadoopLoadGenerator.java

License:Apache License

/**
 * Prepare input directory/jobConf and launch the hadoop job, for load testing
 *
 * @param confFileName The properties file for the task, should be available in the classpath
 * @param conf//from   ww  w.j  a  v  a2 s .c o m
 * @return
 * @throws IOException
 * @throws MetaException
 * @throws TException
 */
public SortedMap<Long, ReduceResult> runLoadTest(String confFileName, Configuration conf)
        throws Exception, MetaException, TException {
    JobConf jobConf;
    if (conf != null) {
        jobConf = new JobConf(conf);
    } else {
        jobConf = new JobConf(new Configuration());
    }
    InputStream confFileIS;
    try {
        confFileIS = HCatMixUtils.getInputStream(confFileName);
    } catch (Exception e) {
        LOG.error("Couldn't load configuration file " + confFileName);
        throw e;
    }
    Properties props = new Properties();
    try {
        props.load(confFileIS);
    } catch (IOException e) {
        LOG.error("Couldn't load properties file: " + confFileName, e);
        throw e;
    }

    LOG.info("Loading configuration file: " + confFileName);
    addToJobConf(jobConf, props, Conf.MAP_RUN_TIME_MINUTES);
    addToJobConf(jobConf, props, Conf.STAT_COLLECTION_INTERVAL_MINUTE);
    addToJobConf(jobConf, props, Conf.THREAD_INCREMENT_COUNT);
    addToJobConf(jobConf, props, Conf.THREAD_INCREMENT_INTERVAL_MINUTES);
    addToJobConf(jobConf, props, Conf.THREAD_COMPLETION_BUFFER_MINUTES);

    int numMappers = Integer
            .parseInt(props.getProperty(Conf.NUM_MAPPERS.propName, "" + Conf.NUM_MAPPERS.defaultValue));
    Path inputDir = new Path(props.getProperty(Conf.INPUT_DIR.propName, Conf.INPUT_DIR.defaultValueStr));
    Path outputDir = new Path(props.getProperty(Conf.OUTPUT_DIR.propName, Conf.OUTPUT_DIR.defaultValueStr));

    jobConf.setJobName(JOB_NAME);
    jobConf.setNumMapTasks(numMappers);
    jobConf.setMapperClass(HCatMapper.class);
    jobConf.setJarByClass(HCatMapper.class);
    jobConf.setReducerClass(HCatReducer.class);
    jobConf.setMapOutputKeyClass(LongWritable.class);
    jobConf.setMapOutputValueClass(IntervalResult.class);
    jobConf.setOutputKeyClass(LongWritable.class);
    jobConf.setOutputValueClass(ReduceResult.class);
    jobConf.setOutputFormat(SequenceFileOutputFormat.class);
    jobConf.set(Conf.TASK_CLASS_NAMES.getJobConfKey(),
            props.getProperty(Conf.TASK_CLASS_NAMES.propName, Conf.TASK_CLASS_NAMES.defaultValueStr));

    fs = FileSystem.get(jobConf);
    Path jarRoot = new Path("/tmp/hcatmix_jar_" + new Random().nextInt());
    HadoopUtils.uploadClasspathAndAddToJobConf(jobConf, jarRoot);
    fs.deleteOnExit(jarRoot);

    FileInputFormat.setInputPaths(jobConf, createInputFiles(inputDir, numMappers));
    if (fs.exists(outputDir)) {
        fs.delete(outputDir, true);
    }
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    // Set up delegation token required for hiveMetaStoreClient in map task
    HiveConf hiveConf = new HiveConf(HadoopLoadGenerator.class);
    HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
    String tokenStr = hiveClient.getDelegationToken(UserGroupInformation.getCurrentUser().getUserName(),
            "mapred");
    Token<? extends AbstractDelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(tokenStr);
    token.setService(new Text(METASTORE_TOKEN_SIGNATURE));
    jobConf.getCredentials().addToken(new Text(METASTORE_TOKEN_KEY), token);

    // Submit the job, once the job is complete see output
    LOG.info("Submitted hadoop job");
    RunningJob j = JobClient.runJob(jobConf);
    LOG.info("JobID is: " + j.getJobName());
    if (!j.isSuccessful()) {
        throw new IOException("Job failed");
    }
    return readResult(outputDir, jobConf);
}

From source file:org.apache.hcatalog.hcatmix.load.tasks.HCatLoadTask.java

License:Apache License

@Override
public void configure(JobConf jobConf) throws Exception {
    Token token = jobConf.getCredentials().getToken(new Text(HadoopLoadGenerator.METASTORE_TOKEN_KEY));

    try {//  w  ww.j av  a  2  s.c o m
        UserGroupInformation.getCurrentUser().addToken(token);
    } catch (IOException e) {
        LOG.info("Error adding token to user", e);
    }
    if (token == null) {
        throw new IllegalArgumentException("Delegation token needs to be set");
    }

    hiveConf = new HiveConf(Task.class);
    hiveConf.set(HIVE_CONF_TOKEN_KEY, HadoopLoadGenerator.METASTORE_TOKEN_SIGNATURE);
    hiveClient = new ThreadLocal<HiveMetaStoreClient>() {
        @Override
        protected HiveMetaStoreClient initialValue() {
            try {
                return new HiveMetaStoreClient(hiveConf);
            } catch (MetaException e) {
                throw new RuntimeException("Couldn't create HiveMetaStoreClient", e);
            }
        }
    };

    HiveTableSchema tableSchema = HCatMixUtils.getFirstTableFromConf(LOAD_TEST_HCAT_SPEC_FILE);
    dbName = tableSchema.getDatabaseName();
    tableName = tableSchema.getName();
    LOG.info("Table to do load test on is: " + dbName + "." + tableName);
}

From source file:org.apache.oozie.action.hadoop.HbaseCredentials.java

License:Apache License

private void obtainToken(final JobConf jobConf, Context context) throws IOException, InterruptedException {
    String user = context.getWorkflow().getUser();
    UserGroupInformation ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
    User u = User.create(ugi);//  w  ww  .ja  va 2 s. c o  m
    // A direct doAs is required here vs. User#obtainAuthTokenForJob(...)
    // See OOZIE-2419 for more
    Token<AuthenticationTokenIdentifier> token = u
            .runAs(new PrivilegedExceptionAction<Token<AuthenticationTokenIdentifier>>() {
                public Token<AuthenticationTokenIdentifier> run() throws Exception {
                    return TokenUtil.obtainToken(jobConf);
                }
            });
    jobConf.getCredentials().addToken(token.getService(), token);
}

From source file:org.apache.oozie.action.hadoop.HCatCredentialHelper.java

License:Apache License

/**
 * This Function will set the HCat token to jobconf
 * @param launcherJobConf - job conf/*from  ww w  .  j  av a2 s . c  o  m*/
 * @param principal - principal for HCat server
 * @param server - Serevr URI for HCat server
 * @throws Exception
 */
public void set(JobConf launcherJobConf, String principal, String server) throws Exception {
    try {
        HCatClient client = getHCatClient(launcherJobConf, principal, server);
        XLog.getLog(getClass())
                .debug("HCatCredentialHelper: set: User name for which token will be asked from HCat: "
                        + launcherJobConf.get(USER_NAME));
        String tokenStrForm = client.getDelegationToken(launcherJobConf.get(USER_NAME),
                UserGroupInformation.getLoginUser().getShortUserName());
        Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>();
        hcatToken.decodeFromUrlString(tokenStrForm);
        launcherJobConf.getCredentials().addToken(new Text("HCat Token"), hcatToken);
        XLog.getLog(getClass()).debug("Added the HCat token in job conf");
    } catch (Exception ex) {
        XLog.getLog(getClass()).debug("set Exception" + ex.getMessage());
        throw ex;
    }
}

From source file:org.apache.oozie.action.hadoop.Hive2Credentials.java

License:Apache License

@Override
public void addtoJobConf(JobConf jobconf, CredentialsProperties props, Context context) throws Exception {
    try {/*from   w w  w  .  j av  a 2 s  . c  om*/
        // load the driver
        Class.forName("org.apache.hive.jdbc.HiveDriver");

        String url = props.getProperties().get(HIVE2_JDBC_URL);
        if (url == null || url.isEmpty()) {
            throw new CredentialException(ErrorCode.E0510,
                    HIVE2_JDBC_URL + " is required to get hive server 2 credential");
        }
        String principal = props.getProperties().get(HIVE2_SERVER_PRINCIPAL);
        if (principal == null || principal.isEmpty()) {
            throw new CredentialException(ErrorCode.E0510,
                    HIVE2_SERVER_PRINCIPAL + " is required to get hive server 2 credential");
        }
        url = url + ";principal=" + principal;
        Connection con = DriverManager.getConnection(url);
        XLog.getLog(getClass()).debug("Connected successfully to " + url);
        // get delegation token for the given proxy user
        String tokenStr = ((HiveConnection) con).getDelegationToken(jobconf.get(USER_NAME), principal);
        XLog.getLog(getClass()).debug("Got token");
        con.close();

        Token<DelegationTokenIdentifier> hive2Token = new Token<DelegationTokenIdentifier>();
        hive2Token.decodeFromUrlString(tokenStr);
        jobconf.getCredentials().addToken(new Text("hive.server2.delegation.token"), hive2Token);
        XLog.getLog(getClass()).debug("Added the Hive Server 2 token in job conf");
    } catch (Exception e) {
        XLog.getLog(getClass()).warn("Exception in addtoJobConf", e);
        throw e;
    }
}

From source file:org.apache.oozie.action.hadoop.InsertTestToken.java

License:Apache License

@Override
public void addtoJobConf(JobConf jobconf, CredentialsProperties props, Context context) throws Exception {
    try {//www .  j a va 2  s .  c o m
        Token<DelegationTokenIdentifier> abctoken = new Token<DelegationTokenIdentifier>();
        jobconf.getCredentials().addToken(new Text("ABC Token"), abctoken);
        XLog.getLog(getClass()).debug("Added the ABC token in job conf");
    } catch (Exception e) {
        XLog.getLog(getClass()).warn("Exception in addtoJobConf", e);
        throw e;
    }
}