Example usage for org.apache.hadoop.mapreduce Reducer subclass-usage

List of usage examples for org.apache.hadoop.mapreduce Reducer subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Reducer subclass-usage.

Usage

From source file edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniquesReducer.java

public class CorrelationTechniquesReducer
        extends Reducer<PairAttributeWritable, SpatioTemporalValueWritable, Text, Text> {

    public static FrameworkUtils utils = new FrameworkUtils();

    Configuration conf;

From source file edu.rosehulman.TFPartialVectorReducer.java

/**
 * Converts a document in to a sparse vector
 */
public class TFPartialVectorReducer extends Reducer<Text, StringTuple, Text, VectorWritable> {

    private final OpenObjectIntHashMap<String> dictionary = new OpenObjectIntHashMap<String>();

From source file edu.ucla.sspace.hadoop.CooccurrenceReducer.java

/**
 * A {@link Reducer} that transforms the co-occurrence of they input key
 * with another word at a certan position to a count of how many times that
 * co-occurrence took place.
 */
public class CooccurrenceReducer extends Reducer<Text, TextIntWritable, WordCooccurrenceWritable, IntWritable> {

From source file edu.udel.mxv.MxvRed.java

/**
 * Sums up all the aij*xj values associated with each yi.
 * The output is: 
 *      - key  = the row number in vector y
 *      - value = value for that element of the vector
 *      

From source file edu.umd.cloud9.example.translation.TransProbReducer.java

public class TransProbReducer extends Reducer<PairOfStrings, FloatWritable, PairOfStrings, FloatWritable> {

    @Override
    public void reduce(PairOfStrings key, Iterable<FloatWritable> values, Context context)
            throws IOException, InterruptedException {

From source file edu.usc.pgroup.louvain.hadoop.ReduceCommunity.java

/**
 * Created by Charith Wickramaarachchi on 6/30/14.
 */
public class ReduceCommunity extends Reducer<Text, BytesWritable, Text, Text> {

    private double precision = 0.000001;

From source file fi.tkk.ics.hadoop.bam.cli.plugins.chipster.Summarize.java

final class SummarizeReducer extends Reducer<LongWritable, Range, NullWritable, RangeCount> {
    public static final String SUMMARY_LEVELS_PROP = "summarize.summary.levels";

    private MultipleOutputs<NullWritable, RangeCount> mos;

    // For the reverse and forward strands, respectively.

From source file fi.tkk.ics.hadoop.bam.cli.plugins.chipster.SummarySort.java

final class SortReducer extends Reducer<LongWritable, Text, NullWritable, Text> {
    @Override
    protected void reduce(LongWritable ignored, Iterable<Text> records,
            Reducer<LongWritable, Text, NullWritable, Text>.Context ctx) throws IOException, InterruptedException {
        for (Text rec : records)
            ctx.write(NullWritable.get(), rec);

From source file fi.tkk.ics.hadoop.bam.cli.plugins.FixMate.java

final class FixMateReducer extends Reducer<Text, SAMRecordWritable, Text, SAMRecordWritable> {
    private final SAMRecordWritable wrec = new SAMRecordWritable();

    @Override
    protected void reduce(Text key, Iterable<SAMRecordWritable> records,
            Reducer<Text, SAMRecordWritable, Text, SAMRecordWritable>.Context ctx)

From source file fi.tkk.ics.hadoop.bam.cli.plugins.Sort.java

final class SortReducer extends Reducer<LongWritable, SAMRecordWritable, NullWritable, SAMRecordWritable> {
    @Override
    protected void reduce(LongWritable ignored, Iterable<SAMRecordWritable> records,
            Reducer<LongWritable, SAMRecordWritable, NullWritable, SAMRecordWritable>.Context ctx)
            throws IOException, InterruptedException {
        for (SAMRecordWritable rec : records)