Example usage for org.apache.hadoop.mapreduce Mapper subclass-usage

List of usage examples for org.apache.hadoop.mapreduce Mapper subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Mapper subclass-usage.

Usage

From source file fr.ens.biologie.genomique.eoulsan.modules.mapping.hadoop.ReadsFilterMapper.java

/**
 * This class defines a read filter mapper.
 * @since 1.0
 * @author Laurent Jourdren
 */
public class ReadsFilterMapper extends Mapper<Text, Text, Text, Text> {

From source file fr.ens.biologie.genomique.eoulsan.modules.mapping.hadoop.ReadsMapperMapper.java

/**
 * This class defines a generic mapper for reads mapping.
 * @since 1.0
 * @author Laurent Jourdren
 */
public class ReadsMapperMapper extends Mapper<Text, Text, Text, Text> {

From source file fr.ens.biologie.genomique.eoulsan.modules.mapping.hadoop.SAMFilterMapper.java

/**
 * This class defines a mapper for alignment filtering.
 * @since 1.0
 * @author Laurent Jourdren
 */
public class SAMFilterMapper extends Mapper<Text, Text, Text, Text> {

From source file full_MapReduce.AttributeInfoMapper.java

public class AttributeInfoMapper extends Mapper<TextArrayWritable, IntWritable, Text, AttributeCounterWritable> {

    public void map(TextArrayWritable key, IntWritable value, Context context)
            throws IOException, InterruptedException {
        Configuration conf = context.getConfiguration();

From source file full_MapReduce.FindBestAttributeMapper.java

public class FindBestAttributeMapper extends Mapper<Text, MapWritable, NullWritable, AttributeGainRatioWritable> {

    public void map(Text key, MapWritable value, Context context) throws IOException, InterruptedException {
        TextArrayWritable values = getValues(value);
        Map<Text, Integer> tuple_per_split = getTuplePerSplit(value);

From source file full_MapReduce.SummarizeMapper.java

public class SummarizeMapper extends Mapper<LongWritable, Text, TextArrayWritable, IntWritable> {

    public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String[] line_splitted = value.toString().split("\t");
        Text[] my_tmp_key = new Text[line_splitted.length];
        for (int i = 0; i < line_splitted.length; ++i) {

From source file gaffer.accumulo.bulkimport.BulkImportMapper.java

/**
 * Mapper for use in bulk import of data into Accumulo. It simply converts provided
 * {@link GraphElement}, {@link SetOfStatistics} pairs into Accumulo keys
 * and values using methods from {@link ConversionUtils}.
 */
public class BulkImportMapper extends Mapper<GraphElement, SetOfStatistics, Key, Value> {

From source file gaffer.accumulo.inputformat.example.ExampleMapper.java

/**
 * An example of a {@link Mapper} that consumes data from the Accumulo underlying
 * Gaffer. This is just the identity mapper but it does include counters to indicate
 * the number of {@link Entity}s and {@link Edge}s in the graph. 
 */
public class ExampleMapper extends Mapper<GraphElement, SetOfStatistics, GraphElement, SetOfStatistics> {

From source file gaffer.accumulo.splitpoints.EstimateSplitPointsMapper.java

/**
 * Mapper class used for estimating the split points to ensure even distribution of
 * data in Accumulo after initial insert.
 */
public class EstimateSplitPointsMapper extends Mapper<GraphElement, SetOfStatistics, Key, Value> {

From source file gaffer.accumulostore.operation.hdfs.handler.job.SampleDataForSplitPointsMapper.java

/**
* Mapper class used for estimating the split points to ensure even distribution of
* data in Accumulo after initial insert.
*/
public class SampleDataForSplitPointsMapper<KEY_IN, VALUE_IN> extends Mapper<KEY_IN, VALUE_IN, Key, Value> {