Example usage for org.apache.hadoop.mapreduce JobContext getReducerClass

List of usage examples for org.apache.hadoop.mapreduce JobContext getReducerClass

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce JobContext getReducerClass.

Prototype

public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass() throws ClassNotFoundException;

Source Link

Document

Get the Reducer class for the job.

Usage

From source file:com.scaleoutsoftware.soss.hserver.hadoop.ReducerWrapperMapreduce.java

License:Apache License

public ReducerWrapperMapreduce(HServerInvocationParameters invocationParameters, int hadoopPartition, int appId,
        int region, boolean sort) throws IOException, ClassNotFoundException, InterruptedException {
    this.invocationParameters = invocationParameters;
    Configuration configuration = (Configuration) invocationParameters.getConfiguration();
    hadoopVersionSpecificCode = HadoopVersionSpecificCode.getInstance(invocationParameters.getHadoopVersion(),
            configuration);/*w w w. j a v a  2s  .  com*/
    JobID jobID = (JobID) invocationParameters.getJobId();

    //Setup task ID info
    TaskAttemptID id = hadoopVersionSpecificCode.createTaskAttemptId(jobID, false, hadoopPartition);
    JobContext jobContext = hadoopVersionSpecificCode.createJobContext(new JobConf(configuration), jobID);
    taskContext = hadoopVersionSpecificCode.createTaskAttemptContext(configuration, id);

    reducer = (org.apache.hadoop.mapreduce.Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils
            .newInstance(jobContext.getReducerClass(), configuration);

    OutputFormat outputFormat = ReflectionUtils.newInstance(jobContext.getOutputFormatClass(), configuration);

    recordWriter = (org.apache.hadoop.mapreduce.RecordWriter<OUTKEY, OUTVALUE>) outputFormat
            .getRecordWriter(taskContext);

    committer = outputFormat.getOutputCommitter(taskContext);
    committer.setupTask(taskContext);

    Class<INKEY> keyClass = (Class<INKEY>) jobContext.getMapOutputKeyClass();
    WritableSerializerDeserializer<INKEY> firstKeySerializer = new WritableSerializerDeserializer<INKEY>(
            keyClass, null);
    WritableSerializerDeserializer<INKEY> secondKeySerializer = new WritableSerializerDeserializer<INKEY>(
            keyClass, null);
    Class<INVALUE> valueClass = (Class<INVALUE>) jobContext.getMapOutputValueClass();
    WritableSerializerDeserializer<INVALUE> valueSerializer = new WritableSerializerDeserializer<INVALUE>(
            valueClass, null);

    DataGridReaderParameters<INKEY, INVALUE> params = new DataGridReaderParameters<INKEY, INVALUE>(region,
            appId, HServerParameters.getSetting(REDUCE_USEMEMORYMAPPEDFILES, configuration) > 0,
            firstKeySerializer, valueSerializer, invocationParameters.getSerializationMode(),
            secondKeySerializer, keyClass, valueClass, sort,
            HServerParameters.getSetting(REDUCE_CHUNKSTOREADAHEAD, configuration),
            1024 * HServerParameters.getSetting(REDUCE_INPUTCHUNKSIZE_KB, configuration),
            HServerParameters.getSetting(REDUCE_CHUNKREADTIMEOUT, configuration));
    DataGridChunkedCollectionReader<INKEY, INVALUE> transport = DataGridChunkedCollectionReader
            .getGridReader(params);

    context = hadoopVersionSpecificCode.getReducerContext(configuration, id, committer, recordWriter, transport,
            null);

}

From source file:edu.uci.ics.hyracks.dataflow.hadoop.HadoopReducerOperatorDescriptor.java

License:Apache License

private Object createReducer() throws Exception {
    if (reducerClass != null) {
        return ReflectionUtils.newInstance(reducerClass, getJobConf());
    } else {/*  ww w.j  a  va2  s  .  c om*/
        Object reducer;
        if (!useAsCombiner) {
            if (getJobConf().getUseNewReducer()) {
                JobContext jobContext = new ContextFactory().createJobContext(getJobConf());
                reducerClass = (Class<? extends org.apache.hadoop.mapreduce.Reducer<?, ?, ?, ?>>) jobContext
                        .getReducerClass();
            } else {
                reducerClass = (Class<? extends Reducer>) getJobConf().getReducerClass();
            }
        } else {
            if (getJobConf().getUseNewReducer()) {
                JobContext jobContext = new ContextFactory().createJobContext(getJobConf());
                reducerClass = (Class<? extends org.apache.hadoop.mapreduce.Reducer<?, ?, ?, ?>>) jobContext
                        .getCombinerClass();
            } else {
                reducerClass = (Class<? extends Reducer>) getJobConf().getCombinerClass();
            }
        }
        reducer = getHadoopClassFactory().createReducer(reducerClass.getName(), getJobConf());
        return reducer;
    }
}