Example usage for org.apache.hadoop.mapreduce Partitioner subclass-usage

List of usage examples for org.apache.hadoop.mapreduce Partitioner subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Partitioner subclass-usage.

Usage

From source file ldbc.socialnet.dbgen.util.MapReduceKeyPartitioner.java

public class MapReduceKeyPartitioner extends Partitioner<MapReduceKey, ReducedUserProfile> {

    public MapReduceKeyPartitioner() {
        super();

    }

From source file ldbc.socialnet.dbgen.util.UpdateEventPartitioner.java

public class UpdateEventPartitioner extends Partitioner<LongWritable, Text> {

    public UpdateEventPartitioner() {
        super();

    }

From source file libra.core.kmersimilarity_r.KmerSimilarityPartitioner.java

/**
 *
 * @author iychoi
 */
public class KmerSimilarityPartitioner extends Partitioner<CompressedSequenceWritable, CompressedIntArrayWritable> {

From source file libra.preprocess.stage2.KmerIndexBuilderPartitioner.java

/**
 *
 * @author iychoi
 */
public class KmerIndexBuilderPartitioner extends Partitioner<CompressedSequenceWritable, IntWritable>
        implements Configurable {

From source file licenseWritables.LicenseKeyPartitioner.java

public class LicenseKeyPartitioner extends Partitioner<LicenseKey, Text> {

    @Override
    public int getPartition(LicenseKey key, Text value, int numberOfReducers) {
        // TODO Auto-generated method stub
        return (key.hashCode() & Integer.MAX_VALUE) % numberOfReducers;

From source file mabh.mr.sa.partitioner.MRQuarterPartitioner.java

public class MRQuarterPartitioner extends Partitioner<Text, FloatWritable> {
    @Override
    public int getPartition(Text arg0, FloatWritable arg1, int numPartitions) {
        //arg0 is date Text - find quarter - that is the partition
        if ("Q1".equals(arg0.toString())) {
            return 0;

From source file name.abhijitsarkar.hadoop.join.KeyPartitioner.java

/**
 * This class partitions the output from {@link CustomerMapper CustomerMapper} and from {@link OrderMapper OrderMapper}
 * so that records with the same key (customer ID) are sent to the same Reducer.
 * 
 * @author Abhijit Sarkar
 */

From source file nl.sanoma.hdt.report.generator.KeyDataPartitioner.java

/**
 * 
 * @author Richrd Ern Kiss
 */
public class KeyDataPartitioner extends Partitioner<KeyData, DoubleWritable> {

From source file org.apache.accumulo.core.client.mapreduce.lib.partition.KeyRangePartitioner.java

/**
 * Hadoop partitioner that uses ranges based on row keys, and optionally sub-bins based on hashing.
 */
public class KeyRangePartitioner extends Partitioner<Key, Writable> implements Configurable {
    private RangePartitioner rp = new RangePartitioner();

From source file org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner.java

/**
 * Hadoop partitioner that uses ranges, and optionally sub-bins based on hashing.
 */
public class RangePartitioner extends Partitioner<Text, Writable> implements Configurable {
    private static final String PREFIX = RangePartitioner.class.getName();
    private static final String CUTFILE_KEY = PREFIX + ".cutFile";