Example usage for org.apache.hadoop.mapreduce Mapper subclass-usage

List of usage examples for org.apache.hadoop.mapreduce Mapper subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Mapper subclass-usage.

Usage

From source file edu.indiana.d2i.htrc.skmeans.StreamingKMeansMapper.java

class StreamingKMeansMapper extends Mapper<WritableComparable<?>, VectorWritable, IntWritable, VectorWritable> {
    private StreamingKMeansAdapter skmeans = null;
    private VectorProjectionIF projector = null;

    @Override
    public void map(WritableComparable<?> key, VectorWritable value, Context context)

From source file edu.indiana.soic.ts.mapreduce.InsertAllMapper.java

public class InsertAllMapper extends Mapper<LongWritable, Text, Text, Text> {
    private static final Logger log = LoggerFactory.getLogger(BulkDataLoader.class);

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        super.setup(context);

From source file edu.indiana.soic.ts.mapreduce.InsertDateMapper.java

public class InsertDateMapper extends Mapper<LongWritable, Text, Text, Text> {

    private static final Logger log = LoggerFactory.getLogger(BulkDataLoader.class);

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {

From source file edu.indiana.soic.ts.mapreduce.pwd.SWGMap.java

public class SWGMap extends Mapper<LongWritable, Text, LongWritable, SWGWritable> {
    private static final Logger LOG = LoggerFactory.getLogger(SWGMap.class);
    private long blockSize;
    private long noOfSequences;
    private long noOfDivisions;

From source file edu.nyu.vida.data_polygamy.feature_identification.IndexCreationMapper.java

public class IndexCreationMapper extends
        Mapper<SpatioTemporalWritable, FloatArrayWritable, AttributeResolutionWritable, SpatioTemporalFloatWritable> {

    public static FrameworkUtils utils = new FrameworkUtils();

    String datasetIdStr = null;

From source file edu.nyu.vida.data_polygamy.pre_processing.PreProcessingMapper.java

/**
 * 
 * @author fchirigati
 *
 */
public class PreProcessingMapper

From source file edu.nyu.vida.data_polygamy.relationship_computation.CorrelationMapper.java

public class CorrelationMapper extends
        Mapper<AttributeResolutionWritable, TopologyTimeSeriesWritable, PairAttributeWritable, TopologyTimeSeriesWritable> {

    public static FrameworkUtils utils = new FrameworkUtils();

    HashMap<Integer, Integer> datasetAggSize = new HashMap<Integer, Integer>();

From source file edu.nyu.vida.data_polygamy.scalar_function_computation.AggregationMapper.java

public class AggregationMapper extends
        Mapper<MultipleSpatioTemporalWritable, AggregationArrayWritable, SpatioTemporalWritable, AggregationArrayWritable> {

    public static FrameworkUtils utils = new FrameworkUtils();

    HashMap<String, String> datasetToId = new HashMap<String, String>();

From source file edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniquesMapper.java

public class CorrelationTechniquesMapper extends
        Mapper<SpatioTemporalWritable, FloatArrayWritable, PairAttributeWritable, SpatioTemporalValueWritable> {

    public static FrameworkUtils utils = new FrameworkUtils();

    String datasetIdStr = null;

From source file edu.rosehulman.CollocMapper.java

/**
 * Pass 1 of the Collocation discovery job which generated ngrams and emits ngrams an their component n-1grams.
 * Input is a SequeceFile<Text,StringTuple>, where the key is a document id and the value is the tokenized documents.
 * <p/>
 */
public class CollocMapper extends Mapper<Text, StringTuple, GramKey, Gram> {