Example usage for org.apache.hadoop.io IntWritable get

List of usage examples for org.apache.hadoop.io IntWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io IntWritable get.

Prototype

public int get() 

Source Link

Document

Return the value of this IntWritable.

Usage

From source file:cn.com.diditaxi.hive.cf.UDFToChar.java

License:Apache License

public Text evaluate(IntWritable i) {
    if (i == null) {
        return null;
    } else {//from w  ww  .ja va 2  s . c  o  m
        out.reset();
        LazyInteger.writeUTF8NoException(out, i.get());
        result.set(out.getData(), 0, out.getCount());
        return result;
    }
}

From source file:cn.com.diditaxi.hive.cf.UDFToChar.java

License:Apache License

public Text evaluate(IntWritable i, Text format) {
    if (i == null || format == null) {
        return null;
    } else {//  ww  w.  j a v a 2s  .  c o  m
        String pattern = format.toString().replace("9", "#");
        decimalFormat.applyPattern(pattern);
        result.set(decimalFormat.format(i.get()));
        return result;
    }
}

From source file:cn.lhfei.hadoop.ch02.MaxTemperatureReducer.java

License:Apache License

@Override
protected void reduce(Text key, Iterable<IntWritable> values,
        Reducer<Text, IntWritable, Text, IntWritable>.Context context)
        throws IOException, InterruptedException {

    int maxValue = Integer.MIN_VALUE;
    for (IntWritable value : values) {
        maxValue = Math.max(maxValue, value.get());
    }/*from  w ww .  j  a  va 2  s. com*/
    context.write(key, new IntWritable(maxValue));
}

From source file:com.axiomine.largecollections.kryo.serializers.IntWritableSerializer.java

License:Apache License

public void write(Kryo kryo, Output output, IntWritable object) {
    output.writeInt(object.get(), false);
}

From source file:com.baidu.cloud.bmr.mapreduce.AccessLogAnalyzerReducer.java

License:Open Source License

@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {
    int sum = 0;//www  . ja  va 2 s  .  c  om
    for (IntWritable val : values) {
        sum += val.get();
    }
    result.set(sum);
    context.write(key, result);
}

From source file:com.bark.hadoop.lab3.PageCountReducer.java

@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context)
        throws IOException, InterruptedException {
    int sum = 0;/*from  w w  w .ja v  a 2 s. co m*/

    for (IntWritable value : values)
        sum += value.get();

    context.write(key, new IntWritable(sum));
}

From source file:com.caseystella.analytics.distribution.RotationTest.java

License:Apache License

@Test
public void rotationTest() throws Exception {
    OutlierConfig config = JSONUtil.INSTANCE.load(amountConfig, OutlierConfig.class);
    final IntWritable numChunksAdded = new IntWritable(0);
    final IntWritable numRotations = new IntWritable(0);
    Distribution.Context context = new Distribution.Context(0, 0) {
        @Override/*from   w  ww .j a va 2 s.  c  o  m*/
        protected void addChunk(Distribution d) {
            super.addChunk(d);
            numChunksAdded.set(numChunksAdded.get() + 1);
        }

        @Override
        protected void rotate() {
            super.rotate();
            numRotations.set(numRotations.get() + 1);
        }
    };
    GlobalStatistics globalStats = new GlobalStatistics();
    Random r = new Random(0);
    List<DataPoint> points = new ArrayList<>();
    DescriptiveStatistics stats = new DescriptiveStatistics();
    LongWritable ts = new LongWritable(0L);
    Assert.assertEquals(context.getAmount(), 0);
    context.addDataPoint(nextDataPoint(r, ts, 1, points), config.getRotationPolicy(),
            config.getChunkingPolicy(), config.getScalingFunction(), globalStats);
    Assert.assertEquals(context.getAmount(), 1);
    Assert.assertEquals(context.getChunks().size(), 1);
    Assert.assertEquals(numChunksAdded.get(), 1);
    Assert.assertEquals(numRotations.get(), 0);
    for (int i = 1; i < 10; ++i) {
        context.addDataPoint(nextDataPoint(r, ts, 1, points), config.getRotationPolicy(),
                config.getChunkingPolicy(), config.getScalingFunction(), globalStats);
        Assert.assertEquals(context.getChunks().size(), 1);
    }
    //at the 11th point, we should create a new chunk
    context.addDataPoint(nextDataPoint(r, ts, 1, points), config.getRotationPolicy(),
            config.getChunkingPolicy(), config.getScalingFunction(), globalStats);
    Assert.assertEquals(context.getChunks().size(), 2);
    Assert.assertEquals(numChunksAdded.get(), 2);
    Assert.assertEquals(context.getAmount(), 11);
    Assert.assertEquals(numRotations.get(), 0);
    for (int i = 12; i <= 110; ++i) {
        context.addDataPoint(nextDataPoint(r, ts, 1, points), config.getRotationPolicy(),
                config.getChunkingPolicy(), config.getScalingFunction(), globalStats);
    }
    Assert.assertEquals(11, numChunksAdded.get());
    Assert.assertEquals(0, numRotations.get());
    //at the 111th point, we should create a rotation
    context.addDataPoint(nextDataPoint(r, ts, 1, points), config.getRotationPolicy(),
            config.getChunkingPolicy(), config.getScalingFunction(), globalStats);
    Assert.assertEquals(12, numChunksAdded.get());
    Assert.assertEquals(11, context.getChunks().size());
    Assert.assertEquals(1, numRotations.get());
    //rotates just past the rotation cutoff (ensuring that we keep at least the last 100 entries in there)
    Assert.assertEquals(context.getAmount(), 101);
    for (int i = 111; i <= 150; ++i) {
        context.addDataPoint(nextDataPoint(r, ts, 1, points), config.getRotationPolicy(),
                config.getChunkingPolicy(), config.getScalingFunction(), globalStats);
    }
    //no matter how far we go in the stream, we always stay at 11 chunks and a total number of values in the distribution of <= 110 (i.e. between the cutoff and cutoff + a chunk)
    Assert.assertEquals(11, context.getChunks().size());
    Assert.assertTrue(context.getAmount() <= 110);
}

From source file:com.cg.mapreduce.fpgrowth.mahout.fpm.ParallelFPGrowthReducer.java

License:Apache License

@Override
protected void reduce(IntWritable key, Iterable<TransactionTree> values, Context context) throws IOException {
    TransactionTree cTree = new TransactionTree();
    for (TransactionTree tr : values) {
        for (Pair<IntArrayList, Long> p : tr) {
            cTree.addPattern(p.getFirst(), p.getSecond());
        }//from  w w w. j a va 2  s. c om
    }

    List<Pair<Integer, Long>> localFList = Lists.newArrayList();
    for (Entry<Integer, MutableLong> fItem : cTree.generateFList().entrySet()) {
        localFList.add(new Pair<Integer, Long>(fItem.getKey(), fItem.getValue().toLong()));
    }

    Collections.sort(localFList, new CountDescendingPairComparator<Integer, Long>());

    if (useFP2) {
        FPGrowthIds.generateTopKFrequentPatterns(cTree.iterator(), freqList, minSupport, maxHeapSize,
                PFPGrowth.getGroupMembers(key.get(), maxPerGroup, numFeatures),
                new IntegerStringOutputConverter(
                        new ContextWriteOutputCollector<IntWritable, TransactionTree, Text, TopKStringPatterns>(
                                context),
                        featureReverseMap),
                new ContextStatusUpdater<IntWritable, TransactionTree, Text, TopKStringPatterns>(context));
    } else {
        FPGrowth<Integer> fpGrowth = new FPGrowth<Integer>();
        fpGrowth.generateTopKFrequentPatterns(new IteratorAdapter(cTree.iterator()), localFList, minSupport,
                maxHeapSize,
                Sets.newHashSet(PFPGrowth.getGroupMembers(key.get(), maxPerGroup, numFeatures).toList()),
                new IntegerStringOutputConverter(
                        new ContextWriteOutputCollector<IntWritable, TransactionTree, Text, TopKStringPatterns>(
                                context),
                        featureReverseMap),
                new ContextStatusUpdater<IntWritable, TransactionTree, Text, TopKStringPatterns>(context));
    }
}

From source file:com.cg.mapreduce.myfpgrowth.ParallelFPGrowthReducer.java

License:Apache License

private ArrayList<TreeNode> generateLocalList(IntWritable key, List<TreeNode> fList) {
    ArrayList<TreeNode> localList = new ArrayList<TreeNode>();
    int fListLen = fList.size();
    int gid = key.get();
    int startIndex = gid * maxPerGroup;
    int lastaIndex = (gid + 1) * maxPerGroup < fListLen ? (gid + 1) * maxPerGroup : fListLen;
    for (int i = startIndex; i < lastaIndex; i++) {
        localList.add(fList.get(i));/*from w  w  w.j  av a  2s  . c  o  m*/
    }
    Collections.sort(localList);
    return localList;
}

From source file:com.chinamobile.bcbsp.bspcontroller.Counters.java

License:Apache License

/**Extracts a block (data enclosed within delimeters) ignoring escape
 * sequences. Throws ParseException if an incomplete block is found else
 * returns null./*ww w .  j  a  v  a 2  s . c  o  m*/
 * @param str
 *        split address to find
 * @param open
 *        if the split is open
 * @param close
 *        if the split is close
 * @param index
 *        block write index.
 * @return blocks has got
 */
private static String getBlock(String str, char open, char close, IntWritable index) throws ParseException {
    StringBuilder split = new StringBuilder();
    int next = StringUtils.findNext(str, open, StringUtils.ESCAPE_CHAR, index.get(), split);
    split.setLength(0); // clear the buffer
    if (next >= 0) {
        ++next; // move over '('
        next = StringUtils.findNext(str, close, StringUtils.ESCAPE_CHAR, next, split);
        if (next >= 0) {
            ++next; // move over ')'
            index.set(next);
            return split.toString(); // found a block
        } else {
            throw new ParseException("Unexpected end of block", next);
        }
    }
    return null; // found nothing
}