Example usage for org.apache.hadoop.io IntWritable get

List of usage examples for org.apache.hadoop.io IntWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io IntWritable get.

Prototype

public int get() 

Source Link

Document

Return the value of this IntWritable.

Usage

From source file:CardTotalReducer.java

License:Apache License

@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {
    int sum = 0;/*from w  w w.  j a va2s.  c o m*/

    // Go through all values to sum up card values for a card suit
    for (IntWritable value : values) {
        sum += value.get();
    }

    context.write(key, new IntWritable(sum));
}

From source file:RangePartitioner.java

License:Apache License

@Override
public int getPartition(IntWritable key, Writable value, int numReduceTasks) {
    return (int) (((float) key.get() / (float) nodeCnt) * numReduceTasks) % numReduceTasks;
}

From source file:RandIntPartSamplerMapper.java

License:Apache License

@Override
public void configure(JobConf conf) {
    id = conf.getInt("mapred.task.partition", -1);
    reducersNum = conf.getInt("PARMM.reducersNum", 1000);
    try {/*from w  w w .j a v  a2 s. c o  m*/
        int id = conf.getInt("mapred.task.partition", -1);
        System.out.println("id: " + id);
        IntWritable[] toSampleArr = DefaultStringifier.loadArray(conf, "PARMM.toSampleArr_" + id,
                IntWritable.class);
        toSample = 0;
        for (IntWritable toSampleRed : toSampleArr) {
            toSample += toSampleRed.get();
        }
        System.out.println("toSample: " + toSample);
        sampleDestinations = new IntWritable[toSample];
        int i = 0;
        for (int k = 0; k < toSampleArr.length; k++) {
            for (int j = 0; j < toSampleArr[k].get(); j++) {
                sampleDestinations[i++] = new IntWritable(k);
            }
        }
        Collections.shuffle(Arrays.asList(sampleDestinations));
    } catch (IOException e) {
    }
}

From source file:FIMPartitioner.java

License:Apache License

@Override
public int getPartition(IntWritable key, Text value, int numPartitions) {
    return key.get();
}

From source file:SleepJob.java

License:Apache License

public int getPartition(IntWritable k, NullWritable v, int numPartitions) {
    return k.get() % numPartitions;
}

From source file:SleepJob.java

License:Apache License

public void map(IntWritable key, IntWritable value, OutputCollector<IntWritable, NullWritable> output,
        Reporter reporter) throws IOException {

    //it is expected that every map processes mapSleepCount number of records. 
    try {/* ww  w .ja v  a  2  s . com*/
        reporter.setStatus("Sleeping... (" + (mapSleepDuration * (mapSleepCount - count)) + ") ms left");
        Thread.sleep(mapSleepDuration);
    } catch (InterruptedException ex) {
        throw (IOException) new IOException("Interrupted while sleeping").initCause(ex);
    }
    ++count;
    // output reduceSleepCount * numReduce number of random values, so that
    // each reducer will get reduceSleepCount number of keys.
    int k = key.get();
    for (int i = 0; i < value.get(); ++i) {
        output.collect(new IntWritable(k + i), NullWritable.get());
    }
}

From source file:SleepJobWithArray.java

License:Apache License

public void map(IntWritable key, IntWritable value, OutputCollector<IntWritable, NullWritable> output,
        Reporter reporter) throws IOException {

    if (initBigArray) {
        // Yes, I should use log4j :-/
        System.out.println("Requesting array of " + bigArraySize);
        int[] foo = new int[bigArraySize];
    }//w  w w.  ja v  a  2 s.  c o  m
    //it is expected that every map processes mapSleepCount number of records. 
    try {
        reporter.setStatus("Sleeping... (" + (mapSleepDuration * (mapSleepCount - count)) + ") ms left");
        Thread.sleep(mapSleepDuration);
    } catch (InterruptedException ex) {
        throw (IOException) new IOException("Interrupted while sleeping").initCause(ex);
    }
    ++count;
    // output reduceSleepCount * numReduce number of random values, so that
    // each reducer will get reduceSleepCount number of keys.
    int k = key.get();
    for (int i = 0; i < value.get(); ++i) {
        output.collect(new IntWritable(k + i), NullWritable.get());
    }
}

From source file:AllLab_Skeleton.Lab1.WordCount_Reducer.java

public void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {

    for (IntWritable val : values) {
        count += val.get();
    }//  ww  w . j av  a 2 s .  c  o m
}

From source file:Analysis.A2_Top_20_Most_Popular_Artists.Top_20_Most_Popular_Artist_Combiner.java

public void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {
    int totalUniquePlayCount = 0;

    // get count and add
    for (IntWritable uniqueCount : values) {
        totalUniquePlayCount += uniqueCount.get();
    }// w  w  w .  j  a v a  2  s.co m

    total.set(totalUniquePlayCount);
    context.write(key, total);
}

From source file:Analysis.A2_Top_20_Most_Popular_Artists.Top_20_Most_Popular_Artist_Reducer.java

public void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {
    int totalUniquePlayCount = 0;

    // get count and add
    for (IntWritable uniqueCount : values) {
        totalUniquePlayCount += uniqueCount.get();
    }//from w w w . j  a  va  2 s .c o m

    //add this artist with its play count to tree map
    top20.put(totalUniquePlayCount, key.toString());

    // if map size has grown > 20 then remove first entry as tree map sorts in ascending order
    if (top20.size() > 20) {
        top20.remove(top20.lastKey());
    }
}