Example usage for org.apache.hadoop.mapred JobConf set

List of usage examples for org.apache.hadoop.mapred JobConf set

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf set.

Prototype

public void set(String name, String value) 

Source Link

Document

Set the value of the name property.

Usage

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setCM_N_COMInstructions(JobConf job, String cmInstrctions) {
    job.set(CM_N_COV_INSTRUCTIONS_CONFIG, cmInstrctions);
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setGroupedAggInstructions(JobConf job, String grpaggInstructions) {
    job.set(GROUPEDAGG_INSTRUCTIONS_CONFIG, grpaggInstructions);
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setRandInstructions(JobConf job, String randInstrctions) {
    job.set(RAND_INSTRUCTIONS_CONFIG, randInstrctions);
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setIntermediateMatrixIndexes(JobConf job, HashSet<Byte> indexes) {
    job.set(INTERMEDIATE_INDEXES_CONFIG, getIndexesString(indexes));
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setupDistCacheInputs(JobConf job, String indices, String pathsString,
        ArrayList<String> paths) {
    job.set(DISTCACHE_INPUT_INDICES, indices);
    job.set(DISTCACHE_INPUT_PATHS, pathsString);
    Path p = null;/*from   w  w w. j a v  a2 s .c  om*/

    for (String spath : paths) {
        p = new Path(spath);

        DistributedCache.addCacheFile(p.toUri(), job);
        DistributedCache.createSymlink(job);
    }
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setInputPartitioningInfo(JobConf job, PDataPartitionFormat[] pformats) {
    job.set(PARTITIONING_OUTPUT_FORMAT_CONFIG, MRJobConfiguration.getCSVString(pformats));
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void updateResultDimsUnknown(JobConf job, byte[] updDimsUnknown) {
    job.set(RESULT_DIMS_UNKNOWN_CONFIG, MRJobConfiguration.getIndexesString(updDimsUnknown));
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static void setUpMultipleOutputs(JobConf job, byte[] resultIndexes, byte[] resultDimsUnknwon,
        String[] outputs, OutputInfo[] outputInfos, boolean inBlockRepresentation, boolean mayContainCtable)
        throws Exception {
    if (resultIndexes.length != outputs.length)
        throw new Exception("number of outputs and result indexes does not match");
    if (outputs.length != outputInfos.length)
        throw new Exception("number of outputs and outputInfos indexes does not match");

    job.set(RESULT_INDEXES_CONFIG, MRJobConfiguration.getIndexesString(resultIndexes));
    job.set(RESULT_DIMS_UNKNOWN_CONFIG, MRJobConfiguration.getIndexesString(resultDimsUnknwon));
    job.setStrings(OUTPUT_MATRICES_DIRS_CONFIG, outputs);
    job.setOutputCommitter(MultipleOutputCommitter.class);

    for (int i = 0; i < outputs.length; i++) {
        MapReduceTool.deleteFileIfExistOnHDFS(new Path(outputs[i]), job);
        if (mayContainCtable && resultDimsUnknwon[i] == (byte) 1) {
            setOutputInfo(job, i, outputInfos[i], false);
        } else {/*from w  ww.  j a v  a 2  s .  co m*/
            setOutputInfo(job, i, outputInfos[i], inBlockRepresentation);
        }
        MultipleOutputs.addNamedOutput(job, Integer.toString(i), outputInfos[i].outputFormatClass,
                outputInfos[i].outputKeyClass, outputInfos[i].outputValueClass);
    }
    job.setOutputFormat(NullOutputFormat.class);

    // configure temp output
    Path tempOutputPath = new Path(constructTempOutputFilename());
    FileOutputFormat.setOutputPath(job, tempOutputPath);
    MapReduceTool.deleteFileIfExistOnHDFS(tempOutputPath, job);

}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

/**
 * // w w w. j  a v a 2s.  co  m
 * @param job
 * @return
 */
public static String setUpSortPartitionFilename(JobConf job) {
    String pfname = constructPartitionFilename();
    job.set(SORT_PARTITION_FILENAME, pfname);

    return pfname;
}

From source file:com.ibm.bi.dml.runtime.matrix.mapred.MRJobConfiguration.java

License:Open Source License

public static HashSet<Byte> setUpOutputIndexesForMapper(JobConf job, byte[] inputIndexes,
        String randInstructions, String instructionsInMapper, String reblockInstructions,
        String aggInstructionsInReducer, String otherInstructionsInReducer, byte[] resultIndexes)
        throws DMLUnsupportedOperationException, DMLRuntimeException {
    //find out what results are needed to send to reducers

    HashSet<Byte> indexesInMapper = new HashSet<Byte>();
    for (byte b : inputIndexes)
        indexesInMapper.add(b);/*from  w w w  .j  av  a  2s  .  c  o m*/

    DataGenMRInstruction[] dataGenIns = null;
    dataGenIns = MRInstructionParser.parseDataGenInstructions(randInstructions);
    getIndexes(dataGenIns, indexesInMapper);

    MRInstruction[] insMapper = MRInstructionParser.parseMixedInstructions(instructionsInMapper);
    getIndexes(insMapper, indexesInMapper);

    ReblockInstruction[] reblockIns = null;
    reblockIns = MRInstructionParser.parseReblockInstructions(reblockInstructions);
    getIndexes(reblockIns, indexesInMapper);

    MRInstruction[] insReducer = MRInstructionParser.parseAggregateInstructions(aggInstructionsInReducer);
    HashSet<Byte> indexesInReducer = new HashSet<Byte>();
    getIndexes(insReducer, indexesInReducer);

    insReducer = MRInstructionParser.parseMixedInstructions(otherInstructionsInReducer);
    getIndexes(insReducer, indexesInReducer);

    for (byte ind : resultIndexes)
        indexesInReducer.add(ind);

    indexesInMapper.retainAll(indexesInReducer);

    job.set(OUTPUT_INDEXES_IN_MAPPER_CONFIG, getIndexesString(indexesInMapper));
    return indexesInMapper;
}