Example usage for org.apache.hadoop.mapred JobConf unset

List of usage examples for org.apache.hadoop.mapred JobConf unset

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf unset.

Prototype

public synchronized void unset(String name) 

Source Link

Document

Unset a previously set property.

Usage

From source file:com.cloudera.recordservice.hive.RecordServiceHiveInputFormat.java

License:Apache License

private void addSplitsForGroup(List<Path> dirs, TableScanOperator tableScan, JobConf conf,
        InputFormat inputFormat, Class<? extends InputFormat> inputFormatClass, int splits, TableDesc table,
        List<InputSplit> result) throws IOException {
    Utilities.copyTableJobPropertiesToConf(table, conf);

    // The table and database name to scan.
    // TODO: This is commented out until we have a pluggable way to configure the
    // SerDe. Until then, the create a separate table and set the job conf properties.
    // String fqTblName[] = table.getTableName().split("\\.");
    ///*  www  . j a v  a 2s.com*/
    // conf.set("recordservice.table.name", table.getTableName());

    if (tableScan != null) {
        pushFilters(conf, tableScan);
        // Set the projected column and table info for the RecordServiceRecordReader.
        conf.set("recordservice.col.names", Joiner.on(",").join(tableScan.getNeededColumns()));
    }
    // Unset the file config. We're going to be just reading from the table.
    conf.unset(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR);
    conf.setInputFormat(inputFormat.getClass());

    // Generate the RecordService
    InputSplit[] iss = inputFormat.getSplits(conf, splits);
    for (InputSplit is : iss) {
        if (is instanceof FileSplit) {
            FileSplit fileSplit = (FileSplit) is;
            LOG.info("INPUT SPLIT: " + fileSplit.getPath().toString());
        }
    }

    // Wrap the InputSplits in HiveInputSplits. We use modified version of the
    // HiveInputSplit to work around some issues with the base one.
    // TODO: Get changes incorporated into Hive.
    for (InputSplit is : iss) {
        result.add(new HiveInputSplitShim(dirs.get(0), is, inputFormatClass.getName()));
    }
}

From source file:org.apache.tez.mapreduce.hadoop.TestConfigTranslationMRToTez.java

License:Apache License

@Test(timeout = 5000)
// Tests derived keys - i.e. the actual key is not set, but the value is 
// derived from a fallback key.
public void testComplexKeys() {

    JobConf confVertex1 = new JobConf();

    confVertex1.set(MRJobConfig.MAP_OUTPUT_KEY_CLASS, IntWritable.class.getName());

    confVertex1.unset(MRJobConfig.KEY_COMPARATOR);
    confVertex1.unset(MRJobConfig.GROUP_COMPARATOR_CLASS);

    MRHelpers.translateMRConfToTez(confVertex1);

    assertEquals(IntWritable.Comparator.class.getName(),
            ConfigUtils.getIntermediateOutputKeyComparator(confVertex1).getClass().getName());
    assertEquals(IntWritable.Comparator.class.getName(),
            ConfigUtils.getIntermediateInputKeyComparator(confVertex1).getClass().getName());
}