Example usage for org.apache.hadoop.mapreduce Reducer subclass-usage

List of usage examples for org.apache.hadoop.mapreduce Reducer subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Reducer subclass-usage.

Usage

From source file org.apache.ben.FileCleaningReducer.java

public class FileCleaningReducer extends Reducer<LongWritable, Text, LongWritable, Text> {

    public void reduce(LongWritable key, Iterable<Text> values, Context context)
            throws IOException, InterruptedException {
    }

From source file org.apache.blur.mapreduce.lib.DefaultBlurReducer.java

/**
 * This class is to be used in conjunction with {@link BlurOutputFormat}
 * .</br></br>
 * 
 * Here is a basic example of how to use both the {@link BlurOutputFormat} and
 * the {@link DefaultBlurReducer} together to build indexes.</br></br>

From source file org.apache.blur.mapreduce.lib.update.UpdateReducer.java

public class UpdateReducer extends Reducer<IndexKey, IndexValue, Text, BlurMutate> {

    private static final String IGNORED_EXISTING_ROWS = "Ignored Existing Rows";
    private static final String MULTIPLE_RECORD_W_SAME_RECORD_ID = "Multiple Record w/ Same Record Id";
    private static final String INDEX_VALUES = "IndexValues";
    private static final String NULL_BLUR_RECORDS = "NULL Blur Records";

From source file org.apache.cassandra.hadoop.ColumnFamilyOutputReducer.java

/**
 * The <code>ColumnFamilyOutputReducer</code> reduces a &lt;key, values&gt;
 * pair, where the value is a generic iterable type, into a list of columns that
 * need to be mutated for that key, where each column corresponds to an element
 * in the value.
 * 

From source file org.apache.crunch.impl.mr.run.CrunchReducer.java

public class CrunchReducer extends Reducer<Object, Object, Object, Object> {

    private static final Log LOG = LogFactory.getLog(CrunchReducer.class);

    private RTNode node;
    private CrunchTaskContext ctxt;

From source file org.apache.falcon.hive.mapreduce.CopyReducer.java

/**
 * Reducer class for Hive DR.
 */
public class CopyReducer extends Reducer<Text, Text, Text, Text> {
    private DRStatusStore hiveDRStore;
    private ScheduledThreadPoolExecutor timer;

From source file org.apache.gobblin.compaction.mapreduce.RecordKeyDedupReducerBase.java

/**
 * A base implementation of deduplication reducer that is format-unaware.
 */
public abstract class RecordKeyDedupReducerBase<KI, VI, KO, VO> extends Reducer<KI, VI, KO, VO> {
    public enum EVENT_COUNTER {
        MORE_THAN_1, DEDUPED, RECORD_COUNT

From source file org.apache.gora.mapreduce.GoraReducer.java

/**
 * Base class for Gora based {@link Reducer}s.
 */
public class GoraReducer<K1, V1, K2, V2 extends Persistent> extends Reducer<K1, V1, K2, V2> {

    /**

From source file org.apache.ignite.internal.processors.hadoop.examples.GridHadoopWordCount2Reducer.java

/**
 * Combiner and Reducer phase of WordCount job.
 */
public class GridHadoopWordCount2Reducer extends Reducer<Text, IntWritable, Text, IntWritable>
        implements Configurable {
    /** Writable container for writing sum of word counts. */

From source file org.apache.ignite.internal.processors.hadoop.examples.HadoopWordCount2Reducer.java

/**
 * Combiner and Reducer phase of WordCount job.
 */
public class HadoopWordCount2Reducer extends Reducer<Text, IntWritable, Text, IntWritable> implements Configurable {
    /** Writable container for writing sum of word counts. */
    private IntWritable totalWordCnt = new IntWritable();