Example usage for org.apache.hadoop.mapreduce JobContext getMapOutputValueClass

List of usage examples for org.apache.hadoop.mapreduce JobContext getMapOutputValueClass

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce JobContext getMapOutputValueClass.

Prototype

public Class<?> getMapOutputValueClass();

Source Link

Document

Get the value class for the map output data.

Usage

From source file:com.scaleoutsoftware.soss.hserver.hadoop.ReducerWrapperMapreduce.java

License:Apache License

public ReducerWrapperMapreduce(HServerInvocationParameters invocationParameters, int hadoopPartition, int appId,
        int region, boolean sort) throws IOException, ClassNotFoundException, InterruptedException {
    this.invocationParameters = invocationParameters;
    Configuration configuration = (Configuration) invocationParameters.getConfiguration();
    hadoopVersionSpecificCode = HadoopVersionSpecificCode.getInstance(invocationParameters.getHadoopVersion(),
            configuration);/*from  www.j  a  v  a 2 s  .  c om*/
    JobID jobID = (JobID) invocationParameters.getJobId();

    //Setup task ID info
    TaskAttemptID id = hadoopVersionSpecificCode.createTaskAttemptId(jobID, false, hadoopPartition);
    JobContext jobContext = hadoopVersionSpecificCode.createJobContext(new JobConf(configuration), jobID);
    taskContext = hadoopVersionSpecificCode.createTaskAttemptContext(configuration, id);

    reducer = (org.apache.hadoop.mapreduce.Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils
            .newInstance(jobContext.getReducerClass(), configuration);

    OutputFormat outputFormat = ReflectionUtils.newInstance(jobContext.getOutputFormatClass(), configuration);

    recordWriter = (org.apache.hadoop.mapreduce.RecordWriter<OUTKEY, OUTVALUE>) outputFormat
            .getRecordWriter(taskContext);

    committer = outputFormat.getOutputCommitter(taskContext);
    committer.setupTask(taskContext);

    Class<INKEY> keyClass = (Class<INKEY>) jobContext.getMapOutputKeyClass();
    WritableSerializerDeserializer<INKEY> firstKeySerializer = new WritableSerializerDeserializer<INKEY>(
            keyClass, null);
    WritableSerializerDeserializer<INKEY> secondKeySerializer = new WritableSerializerDeserializer<INKEY>(
            keyClass, null);
    Class<INVALUE> valueClass = (Class<INVALUE>) jobContext.getMapOutputValueClass();
    WritableSerializerDeserializer<INVALUE> valueSerializer = new WritableSerializerDeserializer<INVALUE>(
            valueClass, null);

    DataGridReaderParameters<INKEY, INVALUE> params = new DataGridReaderParameters<INKEY, INVALUE>(region,
            appId, HServerParameters.getSetting(REDUCE_USEMEMORYMAPPEDFILES, configuration) > 0,
            firstKeySerializer, valueSerializer, invocationParameters.getSerializationMode(),
            secondKeySerializer, keyClass, valueClass, sort,
            HServerParameters.getSetting(REDUCE_CHUNKSTOREADAHEAD, configuration),
            1024 * HServerParameters.getSetting(REDUCE_INPUTCHUNKSIZE_KB, configuration),
            HServerParameters.getSetting(REDUCE_CHUNKREADTIMEOUT, configuration));
    DataGridChunkedCollectionReader<INKEY, INVALUE> transport = DataGridChunkedCollectionReader
            .getGridReader(params);

    context = hadoopVersionSpecificCode.getReducerContext(configuration, id, committer, recordWriter, transport,
            null);

}