Example usage for org.apache.hadoop.mapreduce Counter getValue

List of usage examples for org.apache.hadoop.mapreduce Counter getValue

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Counter getValue.

Prototype

long getValue();

Source Link

Document

What is the current value of this counter?

Usage

From source file:org.apache.nutch.parse.ParserReducer.java

License:Apache License

String getParseStatus(Context context) {
    String res = "parser result:";
    for (String code : ParseStatusCodes.majorCodes) {
        org.apache.hadoop.mapreduce.Counter counter = context.getCounter("ParserStatus", code);
        res += (" " + counter.getName() + "=" + counter.getValue()) + "\n";
    }/*ww w .j  a  v a  2s  . c  om*/
    return res + " <br/>" + (new Date().toLocaleString());
}

From source file:org.apache.nutch.util.ToolUtil.java

License:Apache License

@SuppressWarnings("unchecked")
public static final void recordJobStatus(String label, Job job, Map<String, Object> results) {
    Map<String, Object> jobs = (Map<String, Object>) results.get(Nutch.STAT_JOBS);
    if (jobs == null) {
        jobs = new LinkedHashMap<String, Object>();
        results.put(Nutch.STAT_JOBS, jobs);
    }/*from   www  .  j  a v  a  2s. co m*/
    Map<String, Object> stats = new HashMap<String, Object>();
    Map<String, Object> countStats = new HashMap<String, Object>();
    try {
        Counters counters = job.getCounters();
        for (CounterGroup cg : counters) {
            Map<String, Object> cnts = new HashMap<String, Object>();
            countStats.put(cg.getDisplayName(), cnts);
            for (Counter c : cg) {
                cnts.put(c.getName(), c.getValue());
            }
        }
    } catch (Exception e) {
        countStats.put("error", e.toString());
    }
    stats.put(Nutch.STAT_COUNTERS, countStats);
    stats.put("jobName", job.getJobName());
    stats.put("jobID", job.getJobID());
    if (label == null) {
        label = job.getJobName();
        if (job.getJobID() != null) {
            label = label + "-" + job.getJobID();
        }
    }
    jobs.put(label, stats);
}

From source file:org.apache.pig.CounterBasedErrorHandler.java

License:Apache License

public long getRecordCount(String storeSignature) {
    Counter counter = getCounter(storeSignature, STORER_RECORD_COUNT);
    return counter.getValue();
}

From source file:org.apache.pig.CounterBasedErrorHandler.java

License:Apache License

private long incAndGetCounter(String storeSignature, String counterName) {
    Counter counter = getCounter(storeSignature, counterName);
    counter.increment(1);/*w  ww .j ava  2  s  .  c  o m*/
    return counter.getValue();
}

From source file:org.apache.pig.piggybank.storage.HadoopJobHistoryLoader.java

License:Apache License

@SuppressWarnings("deprecation")
private static void parseAndAddJobCounters(Map<String, String> job, String counters) {
    try {//from   w ww  .  j av  a 2 s .c om
        Counters counterGroups = Counters.fromEscapedCompactString(counters);
        for (Group otherGroup : counterGroups) {
            Group group = counterGroups.getGroup(otherGroup.getName());
            for (Counter otherCounter : otherGroup) {
                Counter counter = group.getCounterForName(otherCounter.getName());
                job.put(otherCounter.getName(), String.valueOf(counter.getValue()));
            }
        }
    } catch (ParseException e) {
        LOG.warn("Failed to parse job counters", e);
    }
}

From source file:org.apache.pig.tools.pigstats.mapreduce.MRJobStats.java

License:Apache License

void addCounters(Job job) {
    try {/*from   w ww .ja va  2s.  co  m*/
        counters = HadoopShims.getCounters(job);
    } catch (IOException e) {
        LOG.warn("Unable to get job counters", e);
    }
    if (counters != null) {
        Counters.Group taskgroup = counters.getGroup(MRPigStatsUtil.TASK_COUNTER_GROUP);
        Counters.Group hdfsgroup = counters.getGroup(MRPigStatsUtil.FS_COUNTER_GROUP);
        Counters.Group multistoregroup = counters.getGroup(MRPigStatsUtil.MULTI_STORE_COUNTER_GROUP);
        Counters.Group multiloadgroup = counters.getGroup(MRPigStatsUtil.MULTI_INPUTS_COUNTER_GROUP);

        mapInputRecords = taskgroup.getCounterForName(MRPigStatsUtil.MAP_INPUT_RECORDS).getCounter();
        mapOutputRecords = taskgroup.getCounterForName(MRPigStatsUtil.MAP_OUTPUT_RECORDS).getCounter();
        reduceInputRecords = taskgroup.getCounterForName(MRPigStatsUtil.REDUCE_INPUT_RECORDS).getCounter();
        reduceOutputRecords = taskgroup.getCounterForName(MRPigStatsUtil.REDUCE_OUTPUT_RECORDS).getCounter();
        hdfsBytesRead = hdfsgroup.getCounterForName(MRPigStatsUtil.HDFS_BYTES_READ).getCounter();
        hdfsBytesWritten = hdfsgroup.getCounterForName(MRPigStatsUtil.HDFS_BYTES_WRITTEN).getCounter();
        spillCount = counters.findCounter(PigCounters.SPILLABLE_MEMORY_MANAGER_SPILL_COUNT).getCounter();
        activeSpillCountObj = counters.findCounter(PigCounters.PROACTIVE_SPILL_COUNT_BAGS).getCounter();
        activeSpillCountRecs = counters.findCounter(PigCounters.PROACTIVE_SPILL_COUNT_RECS).getCounter();

        Iterator<Counter> iter = multistoregroup.iterator();
        while (iter.hasNext()) {
            Counter cter = iter.next();
            multiStoreCounters.put(cter.getName(), cter.getValue());
        }

        Iterator<Counter> iter2 = multiloadgroup.iterator();
        while (iter2.hasNext()) {
            Counter cter = iter2.next();
            multiInputCounters.put(cter.getName(), cter.getValue());
        }

    }
}

From source file:org.apache.rya.accumulo.pig.IndexWritingTool.java

License:Apache License

@Override
public int run(final String[] args) throws Exception {
    Preconditions.checkArgument(args.length == 7, "java " + IndexWritingTool.class.getCanonicalName()
            + " hdfsSaveLocation sparqlFile cbinstance cbzk cbuser cbpassword rdfTablePrefix.");

    final String inputDir = args[0];
    final String sparqlFile = args[1];
    final String instStr = args[2];
    final String zooStr = args[3];
    final String userStr = args[4];
    final String passStr = args[5];
    final String tablePrefix = args[6];

    final String sparql = FileUtils.readFileToString(new File(sparqlFile));

    final Job job = new Job(getConf(), "Write HDFS Index to Accumulo");
    job.setJarByClass(this.getClass());

    final Configuration jobConf = job.getConfiguration();
    jobConf.setBoolean("mapred.map.tasks.speculative.execution", false);
    setVarOrders(sparql, jobConf);// w w w .  jav a  2  s .c o m

    TextInputFormat.setInputPaths(job, inputDir);
    job.setInputFormatClass(TextInputFormat.class);

    job.setMapperClass(MyMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Mutation.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Mutation.class);

    job.setNumReduceTasks(0);

    String tableName;
    if (zooStr.equals("mock")) {
        tableName = tablePrefix;
    } else {
        tableName = tablePrefix + "INDEX_" + UUID.randomUUID().toString().replace("-", "").toUpperCase();
    }
    setAccumuloOutput(instStr, zooStr, userStr, passStr, job, tableName);

    jobConf.set(sparql_key, sparql);

    final int complete = job.waitForCompletion(true) ? 0 : -1;

    if (complete == 0) {

        final String[] varOrders = jobConf.getStrings("varOrders");
        final String orders = Joiner.on("\u0000").join(varOrders);
        Instance inst;

        if (zooStr.equals("mock")) {
            inst = new MockInstance(instStr);
        } else {
            inst = new ZooKeeperInstance(instStr, zooStr);
        }

        final Connector conn = inst.getConnector(userStr, passStr.getBytes(StandardCharsets.UTF_8));
        final BatchWriter bw = conn.createBatchWriter(tableName, 10, 5000, 1);

        final Counters counters = job.getCounters();
        final Counter c1 = counters.findCounter(cardCounter, cardCounter);

        final Mutation m = new Mutation("~SPARQL");
        final Value v = new Value(sparql.getBytes(StandardCharsets.UTF_8));
        m.put(new Text("" + c1.getValue()), new Text(orders), v);
        bw.addMutation(m);

        bw.close();

        return complete;
    } else {
        return complete;
    }

}

From source file:org.apache.tez.mapreduce.hadoop.mapred.MRCounters.java

License:Apache License

static org.apache.tez.common.counters.TezCounter convert(org.apache.hadoop.mapred.Counters.Counter counter) {
    org.apache.hadoop.mapreduce.Counter underlyingCounter = counter.getUnderlyingCounter();
    if (underlyingCounter instanceof org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup.FrameworkCounter) {
        org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup.FrameworkCounter real = (org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup.FrameworkCounter) underlyingCounter;
        return new org.apache.tez.common.counters.FrameworkCounterGroup.FrameworkCounter(real.getKey(),
                real.getGroupName());//from www  .j a v a 2 s  . com
    } else if (underlyingCounter instanceof org.apache.hadoop.mapreduce.counters.FileSystemCounterGroup.FSCounter) {
        org.apache.hadoop.mapreduce.counters.FileSystemCounterGroup.FSCounter real = (org.apache.hadoop.mapreduce.counters.FileSystemCounterGroup.FSCounter) underlyingCounter;
        return new org.apache.tez.common.counters.FileSystemCounterGroup.FSCounter(real.getScheme(),
                convert(real.getFileSystemCounter()));
    } else {
        return new org.apache.tez.common.counters.GenericCounter(underlyingCounter.getName(),
                underlyingCounter.getDisplayName(), underlyingCounter.getValue());
    }
}

From source file:org.godhuli.rhipe.RHMR.java

License:Apache License

public static REXP buildListFromCounters(org.apache.hadoop.mapreduce.Counters counters, double tt) {
    //      String[] groupnames = counters.getGroupNames().toArray(new String[] {});
    List<String> list = new ArrayList<String>();
    for (String groupName : counters.getGroupNames()) {
        list.add(groupName);/*from   ww  w. j  a v  a 2s  .  c  om*/
    }
    String[] groupnames = new String[list.size()];
    groupnames = list.toArray(groupnames);

    String[] groupdispname = new String[groupnames.length + 1];
    Vector<REXP> cn = new Vector<REXP>();
    for (int i = 0; i < groupnames.length; i++) {
        org.apache.hadoop.mapreduce.CounterGroup cgroup = counters.getGroup(groupnames[i]);
        groupdispname[i] = cgroup.getDisplayName();
        REXP.Builder cvalues = REXP.newBuilder();
        Vector<String> cnames = new Vector<String>();
        cvalues.setRclass(REXP.RClass.REAL);
        for (org.apache.hadoop.mapreduce.Counter counter : cgroup) {
            cvalues.addRealValue((double) counter.getValue());
            cnames.add(counter.getDisplayName());
        }
        cvalues.addAttrName("names");
        cvalues.addAttrValue(RObjects.makeStringVector(cnames.toArray(new String[] {})));
        cn.add(cvalues.build());
    }
    groupdispname[groupnames.length] = "job_time";
    REXP.Builder cvalues = REXP.newBuilder();
    cvalues.setRclass(REXP.RClass.REAL);
    cvalues.addRealValue(tt);
    cn.add(cvalues.build());
    return (RObjects.makeList(groupdispname, cn));
}

From source file:org.gridgain.client.hadoop.GridHadoopClientProtocolSelfTest.java

License:Open Source License

/**
 * Tests job counters retrieval./*from   w w  w.  ja  va2s.  c  o  m*/
 *
 * @throws Exception If failed.
 */
public void testJobCounters() throws Exception {
    GridGgfs ggfs = grid(0).ggfs(GridHadoopAbstractSelfTest.ggfsName);

    ggfs.mkdirs(new GridGgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(ggfs.create(new GridGgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n"
                + "gamma\n");
    }

    Configuration conf = config(GridHadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(TestCountingMapper.class);
    job.setReducerClass(TestCountingReducer.class);
    job.setCombinerClass(TestCountingCombiner.class);

    FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
    FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));

    job.submit();

    final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);

    assertEquals(0, cntr.getValue());

    cntr.increment(10);

    assertEquals(10, cntr.getValue());

    // Transferring to map phase.
    setupLockFile.delete();

    // Transferring to reduce phase.
    mapLockFile.delete();

    job.waitForCompletion(false);

    assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());

    final Counters counters = job.getCounters();

    assertNotNull("counters cannot be null", counters);
    assertEquals("wrong counters count", 3, counters.countCounters());
    assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
    assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
    assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
}