Example usage for org.apache.hadoop.mapreduce Counter getValue

List of usage examples for org.apache.hadoop.mapreduce Counter getValue

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Counter getValue.

Prototype

long getValue();

Source Link

Document

What is the current value of this counter?

Usage

From source file:com.awcoleman.ExampleJobSummaryLogWithOutput.BinRecToAvroRecDriver.java

License:Apache License

public int run(String[] args) throws Exception {

    String input = null;//from w w  w  . j  a  v  a 2s.  c  om
    String output = null;

    if (args.length < 2) {
        System.err.printf("Usage: %s <input> <output>\n", this.getClass().getSimpleName());
        return -1;
    } else {
        input = args[0];
        output = args[1];
    }

    Job job = Job.getInstance(getConf(), "BinRecToAvroRecDriver");
    Configuration conf = job.getConfiguration();

    //Add job log to hold Driver logging (and any summary info about the dataset,job, or counters we want to write)
    String fapath = createTempFileAppender(job);

    //get schema
    Schema outSchema = ReflectData.get().getSchema(com.awcoleman.examples.avro.BinRecForPartitions.class);
    job.getConfiguration().set("outSchema", outSchema.toString());

    //Job conf settings
    job.setJarByClass(BinRecToAvroRecDriver.class);
    job.setMapperClass(Map.class);
    job.setReducerClass(Reduce.class);
    job.setInputFormatClass(BinRecInputFormat.class);
    job.setOutputFormatClass(AvroKeyOutputFormat.class);
    AvroJob.setOutputKeySchema(job, outSchema);

    AvroJob.setMapOutputKeySchema(job, Schema.create(Schema.Type.STRING));
    AvroJob.setMapOutputValueSchema(job, outSchema);

    //Job output compression
    FileOutputFormat.setCompressOutput(job, true);
    job.getConfiguration().set(AvroJob.CONF_OUTPUT_CODEC, DataFileConstants.DEFLATE_CODEC);

    //Input and Output Paths
    FileInputFormat.setInputPaths(job, new Path(input));
    Path outPath = new Path(output);
    FileOutputFormat.setOutputPath(job, outPath);
    outPath.getFileSystem(conf).delete(outPath, true);

    boolean jobCompletionStatus = job.waitForCompletion(true);

    //Print Custom Counters before exiting
    Counters counters = job.getCounters();
    for (MYJOB_CNTRS customCounter : MYJOB_CNTRS.values()) {
        Counter thisCounter = counters.findCounter(customCounter);
        System.out.println("Custom Counter " + customCounter + "=" + thisCounter.getValue());
    }

    long mycnt1 = job.getCounters()
            .findCounter("com.awcoleman.TestingGettingContainerLogger.BinRecToAvroRecDriver$MYJOB_CNTRS",
                    "MYCNT1")
            .getValue();
    long mycnt2 = job.getCounters()
            .findCounter("com.awcoleman.TestingGettingContainerLogger.BinRecToAvroRecDriver$MYJOB_CNTRS",
                    "MYCNT2")
            .getValue();
    long mycnt3 = job.getCounters()
            .findCounter("com.awcoleman.TestingGettingContainerLogger.BinRecToAvroRecDriver$MYJOB_CNTRS",
                    "MYCNT3")
            .getValue();

    long myfakekpi = mycnt1 - mycnt2;

    String msgMyfakekpi = "The Fake KPI of the Dataset: " + String.format("%,d", myfakekpi);
    System.out.println(msgMyfakekpi);
    logger.info(msgMyfakekpi);

    //Finished, so move job log to HDFS in _log dir, clean
    copyTempFileAppenderToHDFSOutpath(job, fapath, output);

    return jobCompletionStatus ? 0 : 1;
}

From source file:com.cloudera.accumulo.upgrade.compatibility.DataCompatibilityVerify.java

License:Open Source License

@Override
public int run(String[] args) throws Exception {
    final String jobName = this.getClass().getName();
    options.parseArgs(jobName, args);//  w  w w.  ja v a  2 s  .c o m
    try {
        final int totalMapSlots = getConf().getInt("mapred.map.tasks",
                DataCompatibilityTestCli.DEFAULT_NUM_ROWS);
        if (-1 == options.test.numRows) {
            options.test.numRows = totalMapSlots;
        }
        final TableOperations ops = options.connection.getConnector().tableOperations();
        final List<String> names = options.test.getTableNames(ops);
        int totalReduceSlots = getConf().getInt("mapred.reduce.tasks", 0);
        if (-1 != options.test.numReduceSlots) {
            totalReduceSlots = options.test.numReduceSlots;
        }
        if (0 == totalReduceSlots) {
            totalReduceSlots = names.size();
        }
        final int reducesPerJob = Math.max(1, totalReduceSlots / names.size());

        final List<Job> jobs = new ArrayList();
        for (String name : names) {
            final Job job = new Job(getConf(), jobName + " " + name);
            job.setJarByClass(this.getClass());
            options.input.useAccumuloInputFormat(job, name);
            job.setMapperClass(DataVerifyMapper.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(LongWritable.class);
            job.setReducerClass(LongSumReducer.class);
            job.setCombinerClass(LongSumReducer.class);
            job.setOutputFormatClass(TextOutputFormat.class);
            TextOutputFormat.setOutputPath(job, new Path(options.test.output, name));
            job.setNumReduceTasks(reducesPerJob);
            job.submit();
            jobs.add(job);
        }

        boolean success = true;
        final long numCellsPerRow = options.test.qualifiers * DataCompatibilityLoad.FAMILIES.length;
        final long numCellsPerFamily = options.test.qualifiers * options.test.numRows;
        for (Job job : jobs) {
            success &= job.waitForCompletion(true);
            final CounterGroup group = job.getCounters().getGroup(DataVerifyMapper.class.getName());
            if (null == group) {
                log.error("Job '" + job.getJobName() + "' doesn't have counters for the verification mapper.");
                success = false;
            } else {
                final Counter badCounter = group.findCounter(BAD_COUNTER);
                if (null != badCounter && 0 < badCounter.getValue()) {
                    log.error("Job '" + job.getJobName() + "' has " + badCounter.getValue()
                            + " entries with bad checksums.");
                    success = false;
                }
                int numRows = 0;
                int numFamilies = 0;
                for (Counter counter : group) {
                    if (counter.getName().startsWith(ROW_COUNTER_PREFIX)) {
                        numRows++;
                        if (numCellsPerRow != counter.getValue()) {
                            log.error("Job '" + job.getJobName() + "', counter '" + counter.getName()
                                    + "' should have " + numCellsPerRow + " cells, but instead has "
                                    + counter.getValue());
                            success = false;
                        }
                    } else if (counter.getName().startsWith(FAMILY_COUNTER_PREFIX)) {
                        numFamilies++;
                        if (numCellsPerFamily != counter.getValue()) {
                            log.error("Job '" + job.getJobName() + "', counter '" + counter.getName()
                                    + "' should have " + numCellsPerFamily + " cells, but instead has "
                                    + counter.getValue());
                            success = false;
                        }
                    }
                }
                if (options.test.numRows != numRows) {
                    log.error("Job '" + job.getJobName() + "' is supposed to have " + options.test.numRows
                            + " rows, but has " + numRows);
                    success = false;
                }
                if (DataCompatibilityLoad.FAMILIES.length != numFamilies) {
                    log.error("Job '" + job.getJobName() + "' is supposed to have "
                            + DataCompatibilityLoad.FAMILIES.length + " families, but has " + numFamilies);
                    success = false;
                }
            }
        }
        if (success) {
            log.info("All internal checks passed.");
        } else {
            log.info("Some checks failed. see log.");
        }
        return success ? 0 : 1;
    } finally {
        options.input.close();
    }
}

From source file:com.cloudera.ByteCount.java

License:Apache License

public static void printCounter(Counters counters, Enum<?> key) {
    Counter c = counters.findCounter(key);
    System.out.println("\t\t" + c.getDisplayName() + " = " + c.getValue());
}

From source file:com.ebay.erl.mobius.core.mapred.CounterUpdateThread.java

License:Apache License

private void reportCounters() {
    synchronized (counts) {
        for (Counter aCounter : counts.keySet()) {
            long previous = aCounter.getValue() < 0 ? 0L : aCounter.getValue();
            long current = counts.get(aCounter).longValue();

            long diff = current - previous;
            if (diff != 0) {
                this.r.setStatus("Updating counters on " + new Date());
                this.r.progress();// set the progress flag so Hadoop know this is still alive.
                aCounter.increment(diff);
            }/*from  ww w .j a  v  a2  s .  c o m*/
        }
    }
}

From source file:com.google.appengine.tools.mapreduce.MapReduceState.java

License:Apache License

private static JSONObject toJson(Counters counters) throws JSONException {
    JSONObject retValue = new JSONObject();
    for (CounterGroup group : counters) {
        for (Counter counter : group) {
            retValue.put(group.getName() + ":" + counter.getName(), counter.getValue());
        }/*from  w  ww. j  a va2  s. c o  m*/
    }

    return retValue;
}

From source file:com.inmobi.conduit.distcp.tools.mapred.CounterProvider.java

License:Apache License

public org.apache.hadoop.mapred.Counters getJobCounters(JobID ignore) throws IOException {
    org.apache.hadoop.mapred.Counters retCounter = new org.apache.hadoop.mapred.Counters();
    for (CounterGroup group : counters) {
        for (Counter counter : group) {
            retCounter.incrCounter(group.getName(), counter.getName(), counter.getValue());
        }//w  w  w  . j ava2 s.co  m
    }
    return retCounter;
}

From source file:com.linkedin.drelephant.mapreduce.fetchers.MapReduceFSFetcherHadoop2.java

License:Apache License

private MapReduceCounterData getCounterData(Counters counters) {
    MapReduceCounterData holder = new MapReduceCounterData();
    for (CounterGroup group : counters) {
        String groupName = group.getName();
        for (Counter counter : group) {
            holder.set(groupName, counter.getName(), counter.getValue());
        }/*from   w  w  w. ja v  a2  s . c  o  m*/
    }
    return holder;
}

From source file:com.linkedin.thirdeye.hadoop.aggregation.AggregationPhaseJob.java

License:Apache License

public Job run() throws Exception {
    Job job = Job.getInstance(getConf());
    job.setJobName(name);//from   w  w w.ja  v a 2 s. c  om
    job.setJarByClass(AggregationPhaseJob.class);

    FileSystem fs = FileSystem.get(getConf());
    Configuration configuration = job.getConfiguration();

    // Properties
    LOGGER.info("Properties {}", props);

    // Input Path
    String inputPathDir = getAndSetConfiguration(configuration, AGG_PHASE_INPUT_PATH);
    LOGGER.info("Input path dir: " + inputPathDir);
    for (String inputPath : inputPathDir.split(ThirdEyeConstants.FIELD_SEPARATOR)) {
        LOGGER.info("Adding input:" + inputPath);
        Path input = new Path(inputPath);
        FileInputFormat.addInputPath(job, input);
    }

    // Output path
    Path outputPath = new Path(getAndSetConfiguration(configuration, AGG_PHASE_OUTPUT_PATH));
    LOGGER.info("Output path dir: " + outputPath.toString());
    if (fs.exists(outputPath)) {
        fs.delete(outputPath, true);
    }
    FileOutputFormat.setOutputPath(job, outputPath);

    // Schema
    Schema avroSchema = ThirdeyeAvroUtils.getSchema(inputPathDir);
    LOGGER.info("Schema : {}", avroSchema.toString(true));
    job.getConfiguration().set(AGG_PHASE_AVRO_SCHEMA.toString(), avroSchema.toString());

    // ThirdEyeConfig
    String metricTypesProperty = ThirdeyeAvroUtils.getMetricTypesProperty(
            props.getProperty(ThirdEyeConfigProperties.THIRDEYE_METRIC_NAMES.toString()),
            props.getProperty(ThirdEyeConfigProperties.THIRDEYE_METRIC_TYPES.toString()), avroSchema);
    props.setProperty(ThirdEyeConfigProperties.THIRDEYE_METRIC_TYPES.toString(), metricTypesProperty);
    ThirdEyeConfig thirdeyeConfig = ThirdEyeConfig.fromProperties(props);
    LOGGER.info("Thirdeye Config {}", thirdeyeConfig.encode());
    job.getConfiguration().set(AGG_PHASE_THIRDEYE_CONFIG.toString(),
            OBJECT_MAPPER.writeValueAsString(thirdeyeConfig));

    // Map config
    job.setMapperClass(AggregationMapper.class);
    job.setInputFormatClass(AvroKeyInputFormat.class);
    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(BytesWritable.class);

    // Reduce config
    job.setReducerClass(AggregationReducer.class);
    job.setOutputKeyClass(AvroKey.class);
    job.setOutputValueClass(NullWritable.class);
    AvroJob.setOutputKeySchema(job, avroSchema);
    job.setOutputFormatClass(AvroKeyOutputFormat.class);
    String numReducers = props.getProperty(ThirdEyeJobProperties.THIRDEYE_NUM_REDUCERS.getName());
    LOGGER.info("Num Reducers : {}", numReducers);
    if (StringUtils.isNotBlank(numReducers)) {
        job.setNumReduceTasks(Integer.valueOf(numReducers));
        LOGGER.info("Setting num reducers {}", job.getNumReduceTasks());
    }

    job.waitForCompletion(true);

    Counter counter = job.getCounters().findCounter(AggregationCounter.NUMBER_OF_RECORDS);
    LOGGER.info(counter.getDisplayName() + " : " + counter.getValue());
    if (counter.getValue() == 0) {
        throw new IllegalStateException("No input records in " + inputPathDir);
    }
    counter = job.getCounters().findCounter(AggregationCounter.NUMBER_OF_RECORDS_FLATTENED);
    LOGGER.info(counter.getDisplayName() + " : " + counter.getValue());

    for (String metric : thirdeyeConfig.getMetricNames()) {
        counter = job.getCounters().findCounter(thirdeyeConfig.getCollection(), metric);
        LOGGER.info(counter.getDisplayName() + " : " + counter.getValue());
    }

    return job;
}

From source file:com.marklogic.contentpump.LocalJobRunner.java

License:Apache License

/**
 * Run the job.  Get the input splits, create map tasks and submit it to
 * the thread pool if there is one; otherwise, runs the the task one by
 * one./*from  w  ww  .  j  a va 2  s  .co m*/
 * 
 * @param <INKEY>
 * @param <INVALUE>
 * @param <OUTKEY>
 * @param <OUTVALUE>
 * @throws Exception
 */
@SuppressWarnings("unchecked")
public <INKEY, INVALUE, OUTKEY, OUTVALUE, T extends org.apache.hadoop.mapreduce.InputSplit> void run()
        throws Exception {
    Configuration conf = job.getConfiguration();
    InputFormat<INKEY, INVALUE> inputFormat = (InputFormat<INKEY, INVALUE>) ReflectionUtils
            .newInstance(job.getInputFormatClass(), conf);
    List<InputSplit> splits = inputFormat.getSplits(job);
    T[] array = (T[]) splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]);

    // sort the splits into order based on size, so that the biggest
    // goes first
    Arrays.sort(array, new SplitLengthComparator());
    OutputFormat<OUTKEY, OUTVALUE> outputFormat = (OutputFormat<OUTKEY, OUTVALUE>) ReflectionUtils
            .newInstance(job.getOutputFormatClass(), conf);
    Class<? extends Mapper<?, ?, ?, ?>> mapperClass = job.getMapperClass();
    Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE> mapper = (Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils
            .newInstance(mapperClass, conf);
    try {
        outputFormat.checkOutputSpecs(job);
    } catch (Exception ex) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Error checking output specification: ", ex);
        } else {
            LOG.error("Error checking output specification: ");
            LOG.error(ex.getMessage());
        }
        return;
    }
    conf = job.getConfiguration();
    progress = new AtomicInteger[splits.size()];
    for (int i = 0; i < splits.size(); i++) {
        progress[i] = new AtomicInteger();
    }
    Monitor monitor = new Monitor();
    monitor.start();
    reporter = new ContentPumpReporter();
    List<Future<Object>> taskList = new ArrayList<Future<Object>>();
    for (int i = 0; i < array.length; i++) {
        InputSplit split = array[i];
        if (pool != null) {
            LocalMapTask<INKEY, INVALUE, OUTKEY, OUTVALUE> task = new LocalMapTask<INKEY, INVALUE, OUTKEY, OUTVALUE>(
                    inputFormat, outputFormat, conf, i, split, reporter, progress[i]);
            availableThreads = assignThreads(i, array.length);
            Class<? extends Mapper<?, ?, ?, ?>> runtimeMapperClass = job.getMapperClass();
            if (availableThreads > 1 && availableThreads != threadsPerSplit) {
                // possible runtime adjustment
                if (runtimeMapperClass != (Class) MultithreadedMapper.class) {
                    runtimeMapperClass = (Class<? extends Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>>) cmd
                            .getRuntimeMapperClass(job, mapperClass, threadsPerSplit, availableThreads);
                }
                if (runtimeMapperClass != mapperClass) {
                    task.setMapperClass(runtimeMapperClass);
                }
                if (runtimeMapperClass == (Class) MultithreadedMapper.class) {
                    task.setThreadCount(availableThreads);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Thread Count for Split#" + i + " : " + availableThreads);
                    }
                }
            }

            if (runtimeMapperClass == (Class) MultithreadedMapper.class) {
                synchronized (pool) {
                    taskList.add(pool.submit(task));
                    pool.wait();
                }
            } else {
                pool.submit(task);
            }
        } else { // single-threaded
            JobID jid = new JobID();
            TaskID taskId = new TaskID(jid.getJtIdentifier(), jid.getId(), TaskType.MAP, i);
            TaskAttemptID taskAttemptId = new TaskAttemptID(taskId, 0);
            TaskAttemptContext context = ReflectionUtil.createTaskAttemptContext(conf, taskAttemptId);
            RecordReader<INKEY, INVALUE> reader = inputFormat.createRecordReader(split, context);
            RecordWriter<OUTKEY, OUTVALUE> writer = outputFormat.getRecordWriter(context);
            OutputCommitter committer = outputFormat.getOutputCommitter(context);
            TrackingRecordReader trackingReader = new TrackingRecordReader(reader, progress[i]);

            Mapper.Context mapperContext = ReflectionUtil.createMapperContext(mapper, conf, taskAttemptId,
                    trackingReader, writer, committer, reporter, split);

            trackingReader.initialize(split, mapperContext);

            // no thread pool (only 1 thread specified)
            Class<? extends Mapper<?, ?, ?, ?>> mapClass = job.getMapperClass();
            mapperContext.getConfiguration().setClass(CONF_MAPREDUCE_JOB_MAP_CLASS, mapClass, Mapper.class);
            mapper = (Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils.newInstance(mapClass,
                    mapperContext.getConfiguration());
            mapper.run(mapperContext);
            trackingReader.close();
            writer.close(mapperContext);
            committer.commitTask(context);
        }
    }
    // wait till all tasks are done
    if (pool != null) {
        for (Future<Object> f : taskList) {
            f.get();
        }
        pool.shutdown();
        while (!pool.awaitTermination(1, TimeUnit.DAYS))
            ;
        jobComplete.set(true);
    }
    monitor.interrupt();
    monitor.join(1000);

    // report counters
    Iterator<CounterGroup> groupIt = reporter.counters.iterator();
    while (groupIt.hasNext()) {
        CounterGroup group = groupIt.next();
        LOG.info(group.getDisplayName() + ": ");
        Iterator<Counter> counterIt = group.iterator();
        while (counterIt.hasNext()) {
            Counter counter = counterIt.next();
            LOG.info(counter.getDisplayName() + ": " + counter.getValue());
        }
    }
    LOG.info("Total execution time: " + (System.currentTimeMillis() - startTime) / 1000 + " sec");
}

From source file:com.marklogic.contentpump.utilities.AuditUtil.java

License:Apache License

/**
 * @param job//from www .  ja  va  2 s  .  co m
 * @param counters
 * @throws IOException
 */
public static void auditMlcpFinish(Configuration conf, String jobName, Counters counters) throws IOException {
    if (!conf.getBoolean(ConfigConstants.CONF_AUDIT_MLCPFINISH_ENABLED, false)) {
        return;
    }

    StringBuilder auditBuf = new StringBuilder();
    auditBuf.append("job=");
    auditBuf.append(jobName);
    auditBuf.append(";");

    Iterator<CounterGroup> groupIt = counters.iterator();
    int groupCounter = 0;
    while (groupIt.hasNext()) {
        CounterGroup group = groupIt.next();
        if (groupCounter != 0) {
            auditBuf.append("; ");
        } else {
            auditBuf.append(" ");
        }

        auditBuf.append('(');
        auditBuf.append(group.getDisplayName());
        auditBuf.append(") ");

        Iterator<Counter> counterIt = group.iterator();
        int counterCount = 0;
        while (counterIt.hasNext()) {
            if (counterCount != 0) {
                auditBuf.append(", ");
            }
            Counter counter = counterIt.next();
            auditBuf.append(counter.getDisplayName());
            auditBuf.append('=');
            auditBuf.append(counter.getValue());
            counterCount++;
        }
        groupCounter++;
    }
    String ruleCounter = conf.get(ConfigConstants.CONF_AUDIT_MLCPFINISH_MESSAGE);
    if (ruleCounter != null) {
        auditBuf.append("; ");
        auditBuf.append(ruleCounter);
    }
    String auditMessage = auditBuf.toString();

    auditBuf = new StringBuilder();
    auditBuf.append("xquery version \"1.0-ml\";\n");
    auditBuf.append("xdmp:audit(\"mlcpfinish\",\"");
    auditBuf.append(auditMessage);
    auditBuf.append("\", xdmp:get-current-user())");
    String auditQueryStr = auditBuf.toString();

    Session auditSession = null;
    ContentSource auditCs = null;
    try {
        auditCs = InternalUtilities.getInputContentSource(conf);
        auditSession = auditCs.newSession();
        RequestOptions options = new RequestOptions();
        options.setCacheResult(false);

        AdhocQuery auditQuery = auditSession.newAdhocQuery(auditQueryStr);
        auditQuery.setOptions(options);
        auditSession.submitRequest(auditQuery);
    } catch (XccConfigException e) {
        LOG.error(e);
        throw new IOException(e);
    } catch (URISyntaxException e) {
        LOG.error(e);
        throw new IOException(e);
    } catch (RequestException e) {
        LOG.error(e);
        LOG.error("Query: " + auditQueryStr);
        throw new IOException(e);
    }
}