Example usage for org.apache.hadoop.mapreduce Counter increment

List of usage examples for org.apache.hadoop.mapreduce Counter increment

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Counter increment.

Prototype

void increment(long incr);

Source Link

Document

Increment this counter by the given value

Usage

From source file:org.apache.mahout.cf.taste.hadoop.item.RecommenderJobTest.java

License:Apache License

/**
 * tests {@link ToUserVectorsReducer}//  ww w .ja  v a 2  s . co  m
 */
@Test
public void testToUserVectorReducer() throws Exception {
    Reducer<VarLongWritable, VarLongWritable, VarLongWritable, VectorWritable>.Context context = EasyMock
            .createMock(Reducer.Context.class);
    Counter userCounters = EasyMock.createMock(Counter.class);

    EasyMock.expect(context.getCounter(ToUserVectorsReducer.Counters.USERS)).andReturn(userCounters);
    userCounters.increment(1);
    context.write(EasyMock.eq(new VarLongWritable(12L)),
            MathHelper.vectorMatches(MathHelper.elem(TasteHadoopUtils.idToIndex(34L), 1.0),
                    MathHelper.elem(TasteHadoopUtils.idToIndex(56L), 2.0)));

    EasyMock.replay(context, userCounters);

    Collection<VarLongWritable> varLongWritables = Lists.newLinkedList();
    varLongWritables.add(new EntityPrefWritable(34L, 1.0f));
    varLongWritables.add(new EntityPrefWritable(56L, 2.0f));

    new ToUserVectorsReducer().reduce(new VarLongWritable(12L), varLongWritables, context);

    EasyMock.verify(context, userCounters);
}

From source file:org.apache.mahout.cf.taste.hadoop.item.RecommenderJobTest.java

License:Apache License

/**
 * tests {@link ToUserVectorsReducer} using boolean data
 *//*w  ww .  ja  v a 2  s.  c  om*/
@Test
public void testToUserVectorReducerWithBooleanData() throws Exception {
    Reducer<VarLongWritable, VarLongWritable, VarLongWritable, VectorWritable>.Context context = EasyMock
            .createMock(Reducer.Context.class);
    Counter userCounters = EasyMock.createMock(Counter.class);

    EasyMock.expect(context.getCounter(ToUserVectorsReducer.Counters.USERS)).andReturn(userCounters);
    userCounters.increment(1);
    context.write(EasyMock.eq(new VarLongWritable(12L)),
            MathHelper.vectorMatches(MathHelper.elem(TasteHadoopUtils.idToIndex(34L), 1.0),
                    MathHelper.elem(TasteHadoopUtils.idToIndex(56L), 1.0)));

    EasyMock.replay(context, userCounters);

    new ToUserVectorsReducer().reduce(new VarLongWritable(12L),
            Arrays.asList(new VarLongWritable(34L), new VarLongWritable(56L)), context);

    EasyMock.verify(context, userCounters);
}

From source file:org.apache.mahout.cf.taste.hadoop.item.ToUserVectorsReducerTest.java

License:Apache License

@Test
public void testToUsersReducerMinPreferencesUserPasses() throws Exception {
    Reducer<VarLongWritable, VarLongWritable, VarLongWritable, VectorWritable>.Context context = EasyMock
            .createMock(Reducer.Context.class);
    Counter userCounters = EasyMock.createMock(Counter.class);

    ToUserVectorsReducer reducer = new ToUserVectorsReducer();
    setField(reducer, "minPreferences", 2);

    EasyMock.expect(context.getCounter(ToUserVectorsReducer.Counters.USERS)).andReturn(userCounters);
    userCounters.increment(1);
    context.write(EasyMock.eq(new VarLongWritable(123)),
            MathHelper.vectorMatches(MathHelper.elem(TasteHadoopUtils.idToIndex(456L), 1.0),
                    MathHelper.elem(TasteHadoopUtils.idToIndex(789L), 1.0)));

    EasyMock.replay(context, userCounters);

    reducer.reduce(new VarLongWritable(123), Arrays.asList(new VarLongWritable(456), new VarLongWritable(789)),
            context);/*w w  w . j av  a  2s  . c  o  m*/

    EasyMock.verify(context, userCounters);
}

From source file:org.apache.mahout.classifier.naivebayes.training.IndexInstancesMapperTest.java

License:Apache License

@Test
public void skip() throws Exception {

    Counter skippedInstances = EasyMock.createMock(Counter.class);

    EasyMock.expect(ctx.getCounter(IndexInstancesMapper.Counter.SKIPPED_INSTANCES)).andReturn(skippedInstances);
    skippedInstances.increment(1);

    EasyMock.replay(ctx, skippedInstances);

    IndexInstancesMapper indexInstances = new IndexInstancesMapper();
    setField(indexInstances, "labelIndex", labelIndex);

    indexInstances.map(new Text("/fish/"), instance, ctx);

    EasyMock.verify(ctx, skippedInstances);
}

From source file:org.apache.mahout.clustering.lda.cvb.PriorTrainingReducer.java

License:Apache License

@Override
public void reduce(IntWritable docId, Iterator<VectorWritable> vectors,
        OutputCollector<IntWritable, VectorWritable> out, Reporter reporter) throws IOException {
    if (this.reporter == null) {
        this.reporter = reporter;
    }/*from   w  w w  .ja  v a 2 s  .  co m*/
    Counter docCounter = reporter.getCounter(Counters.DOCS);
    docCounter.increment(1);
    Vector topicVector = null;
    Vector document = null;
    while (vectors.hasNext()) {
        VectorWritable v = vectors.next();
        /*
         *  NOTE: we are susceptible to the pathological case of numTerms == numTopics (which should
         *  never happen, as that would generate a horrible topic model), because we identify which
         *  vector is the "prior" and which is the document by document.size() == numTerms
         */
        if (v.get().size() == numTerms) {
            document = v.get();
        } else {
            topicVector = v.get();
        }
    }
    if (document == null) {
        if (topicVector != null) {
            reporter.getCounter(Counters.UNUSED_PRIORS).increment(1);
        }
        reporter.getCounter(Counters.SKIPPED_DOC_IDS).increment(1);
        return;
    } else if (topicVector == null && onlyLabeledDocs) {
        reporter.getCounter(Counters.SKIPPED_DOC_IDS).increment(1);
        return;
    } else {
        if (topicVector == null) {
            topicVector = new DenseVector(numTopics).assign(1.0 / numTopics);
        } else {
            if (reporter.getCounter(Counters.DOCS_WITH_PRIORS).getCounter() % 100 == 0) {
                long docsWithPriors = reporter.getCounter(Counters.DOCS_WITH_PRIORS).getCounter();
                long skippedDocs = reporter.getCounter(Counters.SKIPPED_DOC_IDS).getCounter();
                long total = reporter.getCounter(Counters.DOCS).getCounter();
                log.info("Processed {} docs total, {} with priors, skipped {} docs",
                        new Object[] { total, docsWithPriors, skippedDocs });
            }
            reporter.getCounter(Counters.DOCS_WITH_PRIORS).increment(1);
        }
        modelTrainer.trainSync(document, topicVector, true, 1);
        multipleOutputs.getCollector(DOC_TOPICS, reporter).collect(docId, new VectorWritable(topicVector));
        reporter.getCounter(Counters.USED_DOCS).increment(1);
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.fetch.FetchTaskContext.java

License:Apache License

@Override
public boolean incrCounter(Enum<?> name, long delta) {
    if (context == null) {
        return false;
    }/*from   w ww. j a  va 2s  .  c  om*/
    Counter counter = context.getCounter(name);
    counter.increment(delta);
    return true;
}

From source file:org.apache.pig.backend.hadoop.executionengine.fetch.FetchTaskContext.java

License:Apache License

@Override
public boolean incrCounter(String group, String name, long delta) {
    if (context == null) {
        return false;
    }//w  w w  .  j  a v  a2 s . c om
    Counter counter = context.getCounter(group, name);
    counter.increment(delta);
    return true;
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigHadoopLogger.java

License:Apache License

@SuppressWarnings("unchecked")
public void warn(Object o, String msg, Enum warningEnum) {
    String displayMessage = o.getClass().getName() + ": " + msg;
    if (aggregate) {
        if (taskIOContext != null) {
            Counter c = taskIOContext.getCounter(warningEnum);
            c.increment(1);
        } else {/* w  w w.  j a v a 2s .c  o  m*/
            //TODO:
            //in local mode of execution if the PigHadoopLogger is used initially,
            //then aggregation cannot be performed as the reporter will be null. 
            //The reference to a reporter is given by Hadoop at run time. 
            //In local mode, due to the absence of Hadoop there will be no reporter
            //Just print the warning message as is.
            //If a warning message is printed in map reduce mode when aggregation
            //is turned on then we have a problem, its a bug.
            log.warn(displayMessage);
        }
    } else {
        log.warn(displayMessage);
    }
}

From source file:org.apache.pig.CounterBasedErrorHandler.java

License:Apache License

private long incAndGetCounter(String storeSignature, String counterName) {
    Counter counter = getCounter(storeSignature, counterName);
    counter.increment(1);
    return counter.getValue();
}

From source file:org.apache.pig.TypedOutputEvalFunc.java

License:Apache License

protected static void safeIncrCounter(String group, String name, Long increment) {
    Counter counter = PigStatusReporter.getInstance().getCounter(group, name);
    if (counter != null) {
        counter.increment(increment);
    }// w  w  w .  j  a v a 2s .  com
}