Example usage for org.apache.hadoop.mapreduce Counter increment

List of usage examples for org.apache.hadoop.mapreduce Counter increment

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Counter increment.

Prototype

void increment(long incr);

Source Link

Document

Increment this counter by the given value

Usage

From source file:EggContext.java

License:Open Source License

/** Increment the Hadoop metric counter of the passed group and name
 *  by the specified amount.  If value not given then counter is
 *  incremented by one./*w  w  w  .j  a  v  a  2s. com*/
 *  @param group   The counter group
 *  @param name    The counter name
 *  @param value   The amount to increment
 */
@JSFunction
public void incrCounter(String group, String name, Object value) {
    Counter counter = task.getCounter(group, name);
    if (value instanceof Undefined)
        counter.increment(1);
    if (value instanceof Double)
        counter.increment(((Double) value).longValue());
}

From source file:authordetect.input.SingleBookReader.java

private void processBookContent() throws IOException {

    currentPos += lineReader.readLine(currentLine);
    String currentLineStr = currentLine.toString().toLowerCase();

    //Processing book content line by line. And update the word map
    while (!isFinish) {
        String[] words = currentLineStr.split(" ");
        //write all words into the word map
        for (String word : words) {
            word = word.trim().replaceAll("[^a-zA-Z0-9]", "").toLowerCase();
            if (!word.equals("")) {
                wordCountMap.put(word, 1);
            }//from  w w  w.j ava  2 s .  c om
        }
        //detect book end
        if (currentLineStr.contains("end") && currentLineStr.contains("gutenberg")) {
            isFinish = true;

            //update counter which stores the book count
            Counter counter = context.getCounter(BookCounter.BOOK_COUNT);
            counter.increment(1);
        }
        currentPos += lineReader.readLine(currentLine);
        currentLineStr = currentLine.toString().toLowerCase();
    }

    //convert word map to text array
    int arrayLen = wordCountMap.entrySet().size();
    Iterator<Map.Entry<String, Integer>> iterator = wordCountMap.entrySet().iterator();
    int maxCount = 0, count;
    String word, wordCount;
    Text[] wordArray = new Text[arrayLen];

    for (int i = 0; i < arrayLen; i++) {
        Map.Entry<String, Integer> entry = iterator.next();
        word = entry.getKey();
        count = entry.getValue();
        wordCount = word + "/" + count;
        wordArray[i] = new Text(wordCount);

        if (count > maxCount) {//get the maximum word count as well
            maxCount = count;
        }
    }

    key = new Text(title + "/" + maxCount);
    value = new TextArrayWritable(wordArray);
}

From source file:boostingPL.MR.AdaBoostPLMapper.java

License:Open Source License

protected void cleanup(Context context) throws IOException, InterruptedException {
    int T = Integer.parseInt(context.getConfiguration().get("BoostingPL.numIterations"));
    System.out.println("Iteration = " + T);

    String boostingName = context.getConfiguration().get("BoostingPL.boostingName");
    Boosting boosting = BoostingPLFactory.createBoosting(boostingName, insts, T);
    Counter iterationCounter = context.getCounter("BoostingPL", "current iterations");
    try {/*  ww  w .j a va  2  s.  c o  m*/
        for (int t = 0; t < T; t++) {
            boosting.run(t);
            context.progress();
            iterationCounter.increment(1);
        }
    } catch (Exception e) {
        LOG.error(e.toString());
        return;
    }

    double[] corWeights = boosting.getClasifiersWeights();
    Classifier[] classifiers = boosting.getClassifiers();
    int taskid = context.getTaskAttemptID().getTaskID().getId();

    Sort.sort(classifiers, corWeights);

    for (int i = 0; i < classifiers.length; i++) {
        System.out.println("nodeid=" + taskid + " cweight=" + corWeights[i]);
        context.write(new IntWritable(taskid), new ClassifierWritable(classifiers[i], corWeights[i]));
    }
}

From source file:com.asakusafw.runtime.flow.ResultOutput.java

License:Apache License

/**
 * Finalizes and closes this output./*from   ww w . j  a  va2  s . c  om*/
 * @throws IOException if failed to finalize the output
 * @throws InterruptedException if interrupted while finalizing the output
 */
public void close() throws IOException, InterruptedException {
    for (Counter counter : counters) {
        counter.increment(records);
    }
    writer.close(context);
}

From source file:com.asakusafw.runtime.stage.directio.AbstractNoReduceDirectOutputMapper.java

License:Apache License

@Override
protected void runInternal(Context context) throws IOException, InterruptedException {
    if (context.nextKeyValue() == false) {
        if (log.isDebugEnabled()) {
            log.debug(MessageFormat.format("There are not input for directly output Mapper {0}@{1}", //$NON-NLS-1$
                    getClass().getName(), context.getTaskAttemptID()));
        }/*from ww w.ja va2 s . co m*/
    } else {
        if (log.isDebugEnabled()) {
            log.debug(MessageFormat.format("Start setup directly output Mapper {0}@{1}", //$NON-NLS-1$
                    getClass().getName(), context.getTaskAttemptID()));
        }
        DirectDataSourceRepository repository = HadoopDataSourceUtil.loadRepository(context.getConfiguration());
        String arguments = context.getConfiguration().get(StageConstants.PROP_ASAKUSA_BATCH_ARGS, ""); //$NON-NLS-1$
        VariableTable variables = new VariableTable(VariableTable.RedefineStrategy.IGNORE);
        variables.defineVariables(arguments);

        String path = variables.parse(rawBasePath, false);
        String id = repository.getRelatedId(path);
        OutputAttemptContext outputContext = HadoopDataSourceUtil.createContext(context, id);
        DataFormat<? super T> format = ReflectionUtils.newInstance(dataFormatClass, context.getConfiguration());
        DirectDataSource datasource = repository.getRelatedDataSource(path);
        String basePath = repository.getComponentPath(path);
        String unresolvedResourcePath = rawResourcePath.replaceAll(Pattern.quote("*"), //$NON-NLS-1$
                String.format("%04d", context.getTaskAttemptID().getTaskID().getId())); //$NON-NLS-1$
        String resourcePath = variables.parse(unresolvedResourcePath);
        DataDefinition<? super T> definition = SimpleDataDefinition.newInstance(dataType, format);

        if (log.isDebugEnabled()) {
            log.debug(MessageFormat.format("Open mapper output (id={0}, basePath={1}, resourcePath={2})", //$NON-NLS-1$
                    id, basePath, resourcePath));
        }

        int records = 0;
        try (ModelOutput<? super T> output = datasource.openOutput(outputContext, definition, basePath,
                resourcePath, outputContext.getCounter())) {
            do {
                output.write(context.getCurrentValue());
                records++;
            } while (context.nextKeyValue());
        } finally {
            if (log.isDebugEnabled()) {
                log.debug(MessageFormat.format("Start cleanup directly output Mapper {0}@{1}", //$NON-NLS-1$
                        getClass().getName(), context.getTaskAttemptID()));
            }
        }
        org.apache.hadoop.mapreduce.Counter recordCounter = JobCompatibility
                .getTaskOutputRecordCounter(context);
        recordCounter.increment(records);
        context.getCounter(COUNTER_GROUP, id + ".files").increment(1); //$NON-NLS-1$
        context.getCounter(COUNTER_GROUP, id + ".records").increment(records); //$NON-NLS-1$
        context.getCounter(COUNTER_GROUP, id + ".size").increment(outputContext.getCounter().get()); //$NON-NLS-1$
    }
}

From source file:com.ebay.erl.mobius.core.mapred.CounterUpdateThread.java

License:Apache License

private void reportCounters() {
    synchronized (counts) {
        for (Counter aCounter : counts.keySet()) {
            long previous = aCounter.getValue() < 0 ? 0L : aCounter.getValue();
            long current = counts.get(aCounter).longValue();

            long diff = current - previous;
            if (diff != 0) {
                this.r.setStatus("Updating counters on " + new Date());
                this.r.progress();// set the progress flag so Hadoop know this is still alive.
                aCounter.increment(diff);
            }//w ww . j  av a  2 s .co m
        }
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyCommitter.java

License:Apache License

@Ignore
@Test/*from  ww w  . j ava 2 s .  c o m*/
public void testCounterProvider() {
    try {
        Job job = MockJobTracker.getJobForClient();
        Counters a = EMPTY_COUNTERS;
        CounterGroup grp = a.getGroup("abc");
        Counter cntr = grp.findCounter("counter");
        cntr.increment(100);
        CounterProvider cp = new CounterProvider(a);
        job.submit();
        Assert.assertEquals(job.getCounters(), a);
    } catch (Exception e) {
        LOG.error("Exception encountered ", e);
    }
}

From source file:com.linkedin.cubert.plan.physical.PerfProfiler.java

License:Open Source License

private void updateCounter() {
    long[] operatorTime = getOperatorTime();

    String profileCounterGroupName = PhaseContext.isMapper() ? mapperProfileCounterGroupName
            : reducerProfileCounterGroupName;

    ArrayNode operatorsJson = multipassOperatorsJson.get(currentPassIndex);
    for (int i = 0; i < operatorTime.length; i++) {
        if (operatorTime[i] > 0) {
            JsonNode operatorJson = operatorsJson.get(i);

            OperatorType type = OperatorType.valueOf(operatorJson.get("operator").getTextValue());
            String outputName = operatorJson.get("output").getTextValue();

            String counterName = String.format("P%d-O%d-%s-%s", currentPassIndex, i, type, outputName);
            Counter profileCounter = PhaseContext.getCounter(profileCounterGroupName, counterName);
            profileCounter.increment(operatorTime[i]);
        }//from   w w w . j av a  2 s.co  m
    }
}

From source file:com.marklogic.mapreduce.ContentWriter.java

License:Apache License

@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
    if (batchSize > 1) {
        int len, sid;
        if (countBased) {
            len = 1;// w  ww  . ja v  a  2 s  .c o m
            sid = sfId;
        } else {
            len = fastLoad ? forestIds.length : 1;
            sid = 0;
        }
        for (int i = 0; i < len; i++, sid++) {
            if (counts[i] > 0) {
                Content[] remainder = new Content[counts[i]];
                System.arraycopy(forestContents[i], 0, remainder, 0, counts[i]);
                if (sessions[sid] == null) {
                    String forestId = forestIds[sid];
                    sessions[sid] = getSession(forestId);
                }
                insertBatch(remainder, sid);
                stmtCounts[sid]++;
            }
        }
    }
    for (int i = 0; i < sessions.length; i++) {
        if (sessions[i] != null) {
            if (stmtCounts[i] > 0 && needCommit) {
                try {
                    sessions[i].commit();
                    succeeded += commitUris[i].size();
                } catch (RequestServerException e) {
                    // log error and continue on RequestServerException
                    LOG.error("Error commiting transaction", e);
                    failed += commitUris[i].size();
                    for (DocumentURI failedUri : commitUris[i]) {
                        LOG.warn("Failed document " + failedUri);
                    }
                    commitUris[i].clear();
                } catch (RequestException e) {
                    if (sessions[i] != null) {
                        sessions[i].close();
                    }
                    if (countBased) {
                        rollbackCount(i);
                    }
                    failed += commitUris[i].size();
                    commitUris[i].clear();
                    throw new IOException(e);
                } finally {
                    sessions[i].close();
                }
            } else {
                sessions[i].close();
            }
        }
    }
    if (is != null) {
        is.close();
        if (is instanceof ZipEntryInputStream) {
            ((ZipEntryInputStream) is).closeZipInputStream();
        }
    }
    Counter committedCounter = context.getCounter(MarkLogicCounter.OUTPUT_RECORDS_COMMITTED);
    synchronized (committedCounter) {
        committedCounter.increment(succeeded);
    }
    Counter failedCounter = context.getCounter(MarkLogicCounter.OUTPUT_RECORDS_FAILED);
    synchronized (failedCounter) {
        committedCounter.increment(failed);
    }
}

From source file:com.samsung.px.pig.storage.DynamoDBStorage.java

License:Apache License

private void reportCounter(String counterName, long incrementValue) {
    PigStatusReporter reporter = PigStatusReporter.getInstance();
    if (reporter != null) {
        Counter counter = reporter.getCounter(DYNAMO_COUNTER_GROUP, counterName);
        if (counter != null) {
            counter.increment(incrementValue);
        }/*from   w  ww  . j av a  2 s.  com*/
    }
}