Example usage for org.apache.hadoop.mapreduce Reducer subclass-usage

List of usage examples for org.apache.hadoop.mapreduce Reducer subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Reducer subclass-usage.

Usage

From source file com.toshiba.mwcloud.gs.hadoop.mapreduce.GSReduce.java

/**
 * <div lang="ja">
 * GridDBReduce??
 * @param <KEYIN> Reduce??
 * @param <VALIN> Reduce??
 * @param <KEYOUT> Reduce??

From source file com.transwarp.hbase.bulkload.PutWritableSortReducer.java

/**
 * Emits sorted Puts.
 * Reads in all Puts from passed Iterator, sorts them, then emits
 * Puts in sorted order.  If lots of columns per row, it will use lots of
 * memory sorting.
 * @see HFileOutputFormat

From source file com.transwarp.hbase.bulkload.TextSortReducer.java

/**
 * Emits Sorted KeyValues. Reads the text passed, parses it and creates the Key Values then Sorts
 * them and emits Keyalues in sorted order. 
 * @see HFileOutputFormat
 * @see KeyValueSortReducer
 * @see PutWritableSortReducer

From source file com.transwarp.hbase.bulkload.withindex.TextWithIndexSortReducer.java

/**
 * Emits Sorted KeyValues. Reads the text passed, parses it and creates the Key Values then Sorts
 * them and emits Keyalues in sorted order. 
 * @see HFileOutputFormat
 * @see KeyValueSortReducer
 * @see PutWritableSortReducer

From source file com.twitter.algebra.MergeVectorsReducer.java

public class MergeVectorsReducer
        extends Reducer<WritableComparable<?>, VectorWritable, WritableComparable<?>, VectorWritable> {
    @Override
    public void reduce(WritableComparable<?> key, Iterable<VectorWritable> vectors, Context context)
            throws IOException, InterruptedException {
        Vector merged = VectorWritable.merge(vectors.iterator()).get();

From source file com.twitter.elephanttwin.indexing.MapFileIndexingReducer.java

/**
 * The reducer simply gets all indexed block offsets for the same text (key
 * value) and put them together as a list to be written to index files.
 */

public class MapFileIndexingReducer extends Reducer<TextLongPairWritable, LongPairWritable, Text, ListLongPair> {

From source file com.twitter.elephanttwin.lucene.indexing.AbstractLuceneIndexingReducer.java

/**
 * <p>
 * The general indexing flow is as follows: the mappers process input records, and pass onto
 * reducers, which perform the actual indexing in Lucene. The number of reducers is equal to the
 * number of shards (partitions), i.e., each reducer builds an index partition independently.
 * </p>

From source file com.veera.secondarysort.demo2.SsReducer.java

/**
 * Secondary sort reducer.
 * @author Jee Vang
 *
 */
public class SsReducer extends Reducer<StockKey, DoubleWritable, Text, Text> {

From source file com.vinod.hadoop.mapreduce.example.secondarysort.SecondarySortReducer.java

/**
 * Secondary sort reducer.
 * @author Jee Vang
 *
 */
public class SecondarySortReducer extends Reducer<StockKey, DoubleWritable, Text, Text> {

From source file com.wipro.ats.bdre.dq.DQFileReportReducer.java

public class DQFileReportReducer extends Reducer<Text, IntWritable, Text, Text> {
    private static final Logger LOGGER = Logger.getLogger(DQFileReportReducer.class);
    private Text outputKey = new Text();
    private Text outputValue = new Text();
    private int goodRecords;
    private int badRecords;