Example usage for org.apache.hadoop.mapred JobConf get

List of usage examples for org.apache.hadoop.mapred JobConf get

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:com.ibm.bi.dml.runtime.util.MapReduceTool.java

License:Open Source License

public static int getUniqueTaskId(JobConf job) {
    //TODO: investigate ID pattern, required for parallel jobs
    /*String nodePrefix = job.get("mapred.task.id"); 
    return IDHandler.extractIntID(nodePrefix);*/

    String nodePrefix = job.get("mapred.task.id");
    int j = nodePrefix.lastIndexOf("_");
    int i = nodePrefix.lastIndexOf("_", j - 1);
    nodePrefix = nodePrefix.substring(i + 1, j);
    // System.out.println("nodePrefix = " + nodePrefix) ;
    return Integer.valueOf(nodePrefix);
}

From source file:com.ibm.bi.dml.runtime.util.MapReduceTool.java

License:Open Source License

public static String getGloballyUniqueName(JobConf job) {
    return job.get("mapred.task.id");
}

From source file:com.ibm.jaql.fail.io.ErrorInputFormat.java

License:Apache License

@Override
public RecordReader<LongWritable, ErrorWritable> getRecordReader(InputSplit split, JobConf job,
        Reporter reporter) throws IOException {
    // fail on OPEN
    String val = job.get(ERROR_NAME);
    Error err = Error.valueOf(val);
    if (err.equals(Error.OPEN)) {
        throw new IOException("Intentional error on open");
    }/*  ww  w .j  a v a 2  s  .c  o  m*/

    return new ErrorRecordReader(job, (ErrorSplit) split);
}

From source file:com.ibm.jaql.fail.io.ErrorInputFormat.java

License:Apache License

@Override
public InputSplit[] getSplits(JobConf arg0, int arg1) throws IOException {
    String val = arg0.get(ERROR_NAME);
    Error err = Error.valueOf(val);
    // fail on SPLIT,
    if (err.equals(Error.SPLIT)) {
        throw new IOException("Intentional error on split");
    }/*www  .ja v  a2s.  c  o  m*/

    int nextCount = arg0.getInt(ERROR_NEXT_MAX, 1);

    // get the original splits
    InputSplit[] splits = super.getSplits(arg0, arg1);
    int len = splits.length;
    ArrayList<InputSplit> newSplits = new ArrayList<InputSplit>(len + 1);
    for (int i = 0; i < len; i++) {
        newSplits.add(new ErrorSplit((FileSplit) splits[i], arg0, err, nextCount));
    }
    // generate bogus split for BOGUS_SPLIT
    if (err.equals(Error.BOGUS_SPLIT)) {

        newSplits.add(new ErrorSplit(new FileSplit(new Path("/bogus/file"), 0, 10, arg0), arg0, Error.NONE,
                nextCount));
    }

    return newSplits.toArray(new InputSplit[newSplits.size()]);
}

From source file:com.ibm.jaql.fail.io.ErrorOutputFormat.java

License:Apache License

@Override
public RecordWriter<LongWritable, ErrorWritable> getRecordWriter(FileSystem arg0, JobConf arg1, String arg2,
        Progressable arg3) throws IOException {
    String val = arg1.get(ERROR_NAME);
    Error e = Error.valueOf(val);
    // if OPEN, fail
    if (e.equals(Error.OPEN)) {
        throw new IOException("Intentional error on open");
    }//from   www  .j  a v a2s  . c  om
    int max = arg1.getInt(ERROR_NEXT_MAX, 1);
    // Wrap in an ErrorRecordWriter
    return new ErrorRecordWriter(super.getRecordWriter(arg0, arg1, arg2, arg3), e, max);
}

From source file:com.ibm.jaql.fail.io.ErrorOutputFormat.java

License:Apache License

@Override
public void checkOutputSpecs(FileSystem arg0, JobConf arg1)
        throws FileAlreadyExistsException, InvalidJobConfException, IOException {
    String val = arg1.get(ERROR_NAME);
    Error e = Error.valueOf(val);
    // if CONFIG, fail
    if (e.equals(Error.CONFIG)) {
        throw new IOException("Intentional error on config");
    }/*from   w ww .  ja  va2s.co  m*/
    super.checkOutputSpecs(arg0, arg1);
}

From source file:com.ibm.jaql.io.hadoop.ArrayInputFormat.java

License:Apache License

public void validateInput(JobConf job) throws IOException {
    // verify that an array exists
    if (job.get(JOB_ARRAY_NAME) == null)
        throw new IOException("variable name: " + JOB_ARRAY_NAME + ", not set");
}

From source file:com.ibm.jaql.io.hadoop.CompositeInputAdapter.java

License:Apache License

public void configure(JobConf conf) {
    Globals.setJobConf(conf);//  w  ww.jav a 2 s.c om
    try {
        RegistryUtil.readConf(conf, HadoopAdapter.storeRegistryVarName, AdapterStore.getStore());
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    // read in the adapter array from conf
    try {
        this.args = ConfUtil.readConfArray(conf, ConfSetter.CONFINOPTIONS_NAME);
        this.addIndex = Boolean.parseBoolean(conf.get(ADD_INDEX_NAME));
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:com.ibm.jaql.io.hadoop.CompositeOutputAdapter.java

License:Apache License

@Override
public void configure(JobConf conf) {
    // TODO: is this needed? How should it get done once?
    //    Globals.setJobConf(conf);
    //    try//from   ww w  .  j  a v  a 2  s.  co m
    //    {
    //      RegistryUtil.readConf(conf, HadoopAdapter.storeRegistryVarName,
    //          AdapterStore.getStore());
    //    }
    //    catch (Exception e)
    //    {
    //      throw new RuntimeException(e);
    //    }

    try {
        // load the registry
        RegistryUtil.readConf(conf, HadoopAdapter.storeRegistryVarName, AdapterStore.getStore());

        // read in the adapter array from conf and initialize it
        descriptors = ConfUtil.readConfArray(conf, ConfSetter.CONFOUTOPTIONS_NAME);
        int numOutputs = (int) descriptors.count();
        outputs = new HadoopOutputAdapter[numOutputs];
        subconfs = new JobConf[numOutputs];
        for (int i = 0; i < outputs.length; i++) {
            JsonValue fd = descriptors.get(i);
            outputs[i] = (HadoopOutputAdapter) AdapterStore.getStore().output.getAdapter(fd);
            subconfs[i] = restoreConf(conf, conf.get(SUBCONF_NAME + i));
            outputs[i].configure(subconfs[i]);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:com.ibm.jaql.io.hadoop.Db2DpfTableInputFormat.java

License:Apache License

public InputSplit[] getSplits(JobConf conf, int numSplits) throws IOException {
    try {//from w  w w . ja  v  a2 s .  co m
        init(conf);
        String schema = conf.get(SCHEMA_KEY);
        String table = conf.get(TABLE_KEY);
        String columns = conf.get(COLUMNS_KEY, "*");
        String where = conf.get(WHERE_KEY, "");

        String schemaTable = "\"" + schema + "\".\"" + table + "\"";
        String tablePred = "TABSCHEMA='" + schema + "' and TABNAME='" + table + "'";

        String keyColQuery = "select COLNAME from syscat.columns where " + tablePred + " and PARTKEYSEQ = 1";

        Statement stmt = conn.createStatement();
        ResultSet rs = stmt.executeQuery(keyColQuery);
        if (!rs.next()) {
            // TODO: we could revert to primary key partitioning instead of raising an error
            throw new IOException("partitioning key not found for " + schemaTable);
        }
        String keyCol = rs.getString(1);
        rs.close();
        stmt.close();

        String partQuery = " select p.dbpartitionnum "
                + " from syscat.tables t, syscat.tablespaces ts, syscat.dbpartitiongroupdef p "
                + " where t.tbspaceid = ts.tbspaceid and ts.dbpgname = p.dbpgname "
                + "   and t.partition_mode = 'H' and " + tablePred;

        stmt = conn.createStatement();
        rs = stmt.executeQuery(partQuery);

        ArrayList<InputSplit> splits = new ArrayList<InputSplit>();

        if (!rs.next()) {
            throw new IOException("no partitions found for table \"" + schema + "\".\"" + table + "\"");
        }

        String query = "select " + columns + " from " + schemaTable + " where SYSIBM.DBPARTITIONNUM(\"" + keyCol
                + "\") = CURRENT DBPARTITIONNUM\n";
        if (!where.equals("")) {
            query += " and (" + where + ")\n";
        }

        do {
            int partitionId = rs.getInt(1);
            splits.add(new DpfSplit(query, partitionId));
        } while (rs.next());

        rs.close();
        stmt.close();
        conn.close();

        return splits.toArray(new InputSplit[splits.size()]);
    } catch (SQLException e) {
        throw new UndeclaredThrowableException(e); // IOException(e);
    }
}