Example usage for org.apache.hadoop.mapreduce CounterGroup getDisplayName

List of usage examples for org.apache.hadoop.mapreduce CounterGroup getDisplayName

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce CounterGroup getDisplayName.

Prototype

String getDisplayName();

Source Link

Document

Get the display name of the group.

Usage

From source file:com.marklogic.contentpump.LocalJobRunner.java

License:Apache License

/**
 * Run the job.  Get the input splits, create map tasks and submit it to
 * the thread pool if there is one; otherwise, runs the the task one by
 * one./*from   w w  w . j  ava2  s  .  co m*/
 * 
 * @param <INKEY>
 * @param <INVALUE>
 * @param <OUTKEY>
 * @param <OUTVALUE>
 * @throws Exception
 */
@SuppressWarnings("unchecked")
public <INKEY, INVALUE, OUTKEY, OUTVALUE, T extends org.apache.hadoop.mapreduce.InputSplit> void run()
        throws Exception {
    Configuration conf = job.getConfiguration();
    InputFormat<INKEY, INVALUE> inputFormat = (InputFormat<INKEY, INVALUE>) ReflectionUtils
            .newInstance(job.getInputFormatClass(), conf);
    List<InputSplit> splits = inputFormat.getSplits(job);
    T[] array = (T[]) splits.toArray(new org.apache.hadoop.mapreduce.InputSplit[splits.size()]);

    // sort the splits into order based on size, so that the biggest
    // goes first
    Arrays.sort(array, new SplitLengthComparator());
    OutputFormat<OUTKEY, OUTVALUE> outputFormat = (OutputFormat<OUTKEY, OUTVALUE>) ReflectionUtils
            .newInstance(job.getOutputFormatClass(), conf);
    Class<? extends Mapper<?, ?, ?, ?>> mapperClass = job.getMapperClass();
    Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE> mapper = (Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils
            .newInstance(mapperClass, conf);
    try {
        outputFormat.checkOutputSpecs(job);
    } catch (Exception ex) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Error checking output specification: ", ex);
        } else {
            LOG.error("Error checking output specification: ");
            LOG.error(ex.getMessage());
        }
        return;
    }
    conf = job.getConfiguration();
    progress = new AtomicInteger[splits.size()];
    for (int i = 0; i < splits.size(); i++) {
        progress[i] = new AtomicInteger();
    }
    Monitor monitor = new Monitor();
    monitor.start();
    reporter = new ContentPumpReporter();
    List<Future<Object>> taskList = new ArrayList<Future<Object>>();
    for (int i = 0; i < array.length; i++) {
        InputSplit split = array[i];
        if (pool != null) {
            LocalMapTask<INKEY, INVALUE, OUTKEY, OUTVALUE> task = new LocalMapTask<INKEY, INVALUE, OUTKEY, OUTVALUE>(
                    inputFormat, outputFormat, conf, i, split, reporter, progress[i]);
            availableThreads = assignThreads(i, array.length);
            Class<? extends Mapper<?, ?, ?, ?>> runtimeMapperClass = job.getMapperClass();
            if (availableThreads > 1 && availableThreads != threadsPerSplit) {
                // possible runtime adjustment
                if (runtimeMapperClass != (Class) MultithreadedMapper.class) {
                    runtimeMapperClass = (Class<? extends Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>>) cmd
                            .getRuntimeMapperClass(job, mapperClass, threadsPerSplit, availableThreads);
                }
                if (runtimeMapperClass != mapperClass) {
                    task.setMapperClass(runtimeMapperClass);
                }
                if (runtimeMapperClass == (Class) MultithreadedMapper.class) {
                    task.setThreadCount(availableThreads);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Thread Count for Split#" + i + " : " + availableThreads);
                    }
                }
            }

            if (runtimeMapperClass == (Class) MultithreadedMapper.class) {
                synchronized (pool) {
                    taskList.add(pool.submit(task));
                    pool.wait();
                }
            } else {
                pool.submit(task);
            }
        } else { // single-threaded
            JobID jid = new JobID();
            TaskID taskId = new TaskID(jid.getJtIdentifier(), jid.getId(), TaskType.MAP, i);
            TaskAttemptID taskAttemptId = new TaskAttemptID(taskId, 0);
            TaskAttemptContext context = ReflectionUtil.createTaskAttemptContext(conf, taskAttemptId);
            RecordReader<INKEY, INVALUE> reader = inputFormat.createRecordReader(split, context);
            RecordWriter<OUTKEY, OUTVALUE> writer = outputFormat.getRecordWriter(context);
            OutputCommitter committer = outputFormat.getOutputCommitter(context);
            TrackingRecordReader trackingReader = new TrackingRecordReader(reader, progress[i]);

            Mapper.Context mapperContext = ReflectionUtil.createMapperContext(mapper, conf, taskAttemptId,
                    trackingReader, writer, committer, reporter, split);

            trackingReader.initialize(split, mapperContext);

            // no thread pool (only 1 thread specified)
            Class<? extends Mapper<?, ?, ?, ?>> mapClass = job.getMapperClass();
            mapperContext.getConfiguration().setClass(CONF_MAPREDUCE_JOB_MAP_CLASS, mapClass, Mapper.class);
            mapper = (Mapper<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils.newInstance(mapClass,
                    mapperContext.getConfiguration());
            mapper.run(mapperContext);
            trackingReader.close();
            writer.close(mapperContext);
            committer.commitTask(context);
        }
    }
    // wait till all tasks are done
    if (pool != null) {
        for (Future<Object> f : taskList) {
            f.get();
        }
        pool.shutdown();
        while (!pool.awaitTermination(1, TimeUnit.DAYS))
            ;
        jobComplete.set(true);
    }
    monitor.interrupt();
    monitor.join(1000);

    // report counters
    Iterator<CounterGroup> groupIt = reporter.counters.iterator();
    while (groupIt.hasNext()) {
        CounterGroup group = groupIt.next();
        LOG.info(group.getDisplayName() + ": ");
        Iterator<Counter> counterIt = group.iterator();
        while (counterIt.hasNext()) {
            Counter counter = counterIt.next();
            LOG.info(counter.getDisplayName() + ": " + counter.getValue());
        }
    }
    LOG.info("Total execution time: " + (System.currentTimeMillis() - startTime) / 1000 + " sec");
}

From source file:com.marklogic.contentpump.utilities.AuditUtil.java

License:Apache License

/**
 * @param job/*from   ww w .  j av  a 2s  . c om*/
 * @param counters
 * @throws IOException
 */
public static void auditMlcpFinish(Configuration conf, String jobName, Counters counters) throws IOException {
    if (!conf.getBoolean(ConfigConstants.CONF_AUDIT_MLCPFINISH_ENABLED, false)) {
        return;
    }

    StringBuilder auditBuf = new StringBuilder();
    auditBuf.append("job=");
    auditBuf.append(jobName);
    auditBuf.append(";");

    Iterator<CounterGroup> groupIt = counters.iterator();
    int groupCounter = 0;
    while (groupIt.hasNext()) {
        CounterGroup group = groupIt.next();
        if (groupCounter != 0) {
            auditBuf.append("; ");
        } else {
            auditBuf.append(" ");
        }

        auditBuf.append('(');
        auditBuf.append(group.getDisplayName());
        auditBuf.append(") ");

        Iterator<Counter> counterIt = group.iterator();
        int counterCount = 0;
        while (counterIt.hasNext()) {
            if (counterCount != 0) {
                auditBuf.append(", ");
            }
            Counter counter = counterIt.next();
            auditBuf.append(counter.getDisplayName());
            auditBuf.append('=');
            auditBuf.append(counter.getValue());
            counterCount++;
        }
        groupCounter++;
    }
    String ruleCounter = conf.get(ConfigConstants.CONF_AUDIT_MLCPFINISH_MESSAGE);
    if (ruleCounter != null) {
        auditBuf.append("; ");
        auditBuf.append(ruleCounter);
    }
    String auditMessage = auditBuf.toString();

    auditBuf = new StringBuilder();
    auditBuf.append("xquery version \"1.0-ml\";\n");
    auditBuf.append("xdmp:audit(\"mlcpfinish\",\"");
    auditBuf.append(auditMessage);
    auditBuf.append("\", xdmp:get-current-user())");
    String auditQueryStr = auditBuf.toString();

    Session auditSession = null;
    ContentSource auditCs = null;
    try {
        auditCs = InternalUtilities.getInputContentSource(conf);
        auditSession = auditCs.newSession();
        RequestOptions options = new RequestOptions();
        options.setCacheResult(false);

        AdhocQuery auditQuery = auditSession.newAdhocQuery(auditQueryStr);
        auditQuery.setOptions(options);
        auditSession.submitRequest(auditQuery);
    } catch (XccConfigException e) {
        LOG.error(e);
        throw new IOException(e);
    } catch (URISyntaxException e) {
        LOG.error(e);
        throw new IOException(e);
    } catch (RequestException e) {
        LOG.error(e);
        LOG.error("Query: " + auditQueryStr);
        throw new IOException(e);
    }
}

From source file:com.netflix.bdp.inviso.history.TraceJobHistoryLoader.java

License:Apache License

private Map<String, Map<String, Long>> handleCounterEntries(Counters counters) {
    Map<String, Map<String, Long>> result = new HashMap<>();

    for (CounterGroup group : counters) {
        Map<String, Long> cmap = new HashMap<>();

        for (Counter counter : group) {
            cmap.put(counter.getDisplayName(), counter.getValue());
        }//from w  ww.j  av  a  2s.c  o m

        result.put(group.getDisplayName(), cmap);
    }

    return result;
}

From source file:dz.lab.mapred.counter.StartsWithCountJob_PrintCounters.java

@Override
public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    // the following property will enable mapreduce to use its packaged local job runner
    //conf.set("mapreduce.framework.name", "local");

    Job job = Job.getInstance(conf, "StartsWithCountJob");
    job.setJarByClass(getClass());//w w  w  .j  a v a2  s  .  c om

    // configure output and input source
    TextInputFormat.addInputPath(job, new Path(args[0]));
    job.setInputFormatClass(TextInputFormat.class);

    // configure mapper and reducer
    job.setMapperClass(StartsWithCountMapper.class);
    job.setCombinerClass(StartsWithCountReducer.class);
    job.setReducerClass(StartsWithCountReducer.class);

    // configure output
    TextOutputFormat.setOutputPath(job, new Path(args[1]));
    job.setOutputFormatClass(TextOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    int resultCode = job.waitForCompletion(true) ? 0 : 1;
    System.out.println("Job is complete! Printing Counters:");
    Counters counters = job.getCounters();

    for (String groupName : counters.getGroupNames()) {
        CounterGroup group = counters.getGroup(groupName);
        System.out.println(group.getDisplayName());

        for (Counter counter : group.getUnderlyingGroup()) {
            System.out.println(" " + counter.getDisplayName() + "=" + counter.getValue());
        }
    }
    return resultCode;
}

From source file:org.apache.ignite.client.hadoop.counter.GridHadoopClientCounters.java

License:Apache License

/** {@inheritDoc} */
@Override/*from w  w w.  j a  v a 2  s.co m*/
public synchronized CounterGroup addGroup(CounterGroup grp) {
    return addGroup(grp.getName(), grp.getDisplayName());
}

From source file:org.apache.nutch.mapreduce.NutchUtil.java

License:Apache License

public static Map<String, Object> getJobCounters(Job job, String... groups) {
    Map<String, Object> counters = Maps.newHashMap();
    if (job == null) {
        return counters;
    }//www.  ja  v a2 s  .  c o  m

    try {
        for (CounterGroup group : job.getCounters()) {
            String groupName = group.getDisplayName();

            if (ArrayUtils.isEmpty(groups) || ArrayUtils.contains(groups, groupName)) {
                Map<String, Object> groupedCounters = Maps.newHashMap();

                for (Counter counter : group) {
                    groupedCounters.put(counter.getName(), counter.getValue());
                }

                counters.put(groupName, groupedCounters);
            }
        }
    } catch (Exception e) {
        counters.put("error", e.toString());
    }

    return counters;
}

From source file:org.apache.nutch.util.ToolUtil.java

License:Apache License

@SuppressWarnings("unchecked")
public static final void recordJobStatus(String label, Job job, Map<String, Object> results) {
    Map<String, Object> jobs = (Map<String, Object>) results.get(Nutch.STAT_JOBS);
    if (jobs == null) {
        jobs = new LinkedHashMap<String, Object>();
        results.put(Nutch.STAT_JOBS, jobs);
    }//from  ww  w.  j a va2 s.com
    Map<String, Object> stats = new HashMap<String, Object>();
    Map<String, Object> countStats = new HashMap<String, Object>();
    try {
        Counters counters = job.getCounters();
        for (CounterGroup cg : counters) {
            Map<String, Object> cnts = new HashMap<String, Object>();
            countStats.put(cg.getDisplayName(), cnts);
            for (Counter c : cg) {
                cnts.put(c.getName(), c.getValue());
            }
        }
    } catch (Exception e) {
        countStats.put("error", e.toString());
    }
    stats.put(Nutch.STAT_COUNTERS, countStats);
    stats.put("jobName", job.getJobName());
    stats.put("jobID", job.getJobID());
    if (label == null) {
        label = job.getJobName();
        if (job.getJobID() != null) {
            label = label + "-" + job.getJobID();
        }
    }
    jobs.put(label, stats);
}

From source file:org.godhuli.rhipe.RHMR.java

License:Apache License

public static REXP buildListFromCounters(org.apache.hadoop.mapreduce.Counters counters, double tt) {
    //      String[] groupnames = counters.getGroupNames().toArray(new String[] {});
    List<String> list = new ArrayList<String>();
    for (String groupName : counters.getGroupNames()) {
        list.add(groupName);/* w w w  .  ja  va2s.  co m*/
    }
    String[] groupnames = new String[list.size()];
    groupnames = list.toArray(groupnames);

    String[] groupdispname = new String[groupnames.length + 1];
    Vector<REXP> cn = new Vector<REXP>();
    for (int i = 0; i < groupnames.length; i++) {
        org.apache.hadoop.mapreduce.CounterGroup cgroup = counters.getGroup(groupnames[i]);
        groupdispname[i] = cgroup.getDisplayName();
        REXP.Builder cvalues = REXP.newBuilder();
        Vector<String> cnames = new Vector<String>();
        cvalues.setRclass(REXP.RClass.REAL);
        for (org.apache.hadoop.mapreduce.Counter counter : cgroup) {
            cvalues.addRealValue((double) counter.getValue());
            cnames.add(counter.getDisplayName());
        }
        cvalues.addAttrName("names");
        cvalues.addAttrValue(RObjects.makeStringVector(cnames.toArray(new String[] {})));
        cn.add(cvalues.build());
    }
    groupdispname[groupnames.length] = "job_time";
    REXP.Builder cvalues = REXP.newBuilder();
    cvalues.setRclass(REXP.RClass.REAL);
    cvalues.addRealValue(tt);
    cn.add(cvalues.build());
    return (RObjects.makeList(groupdispname, cn));
}

From source file:org.huahinframework.manager.rest.service.JobService.java

License:Apache License

/**
 * @param jobId/*from  ww  w .  j av a2  s. com*/
 * @return {@link JSONObject}
 * @throws IOException
 * @throws InterruptedException
 */
private Map<String, Object> getStatus(String jobId) throws IOException, InterruptedException {
    Map<String, Object> job = null;

    Cluster cluster = new Cluster(getJobConf());
    for (JobStatus jobStatus : cluster.getAllJobStatuses()) {
        if (jobStatus.getJobID().toString().equals(jobId)) {
            job = JobUtils.getJob(jobStatus);
            Job j = cluster.getJob(jobStatus.getJobID());
            if (j == null) {
                break;
            }

            Calendar finishTime = Calendar.getInstance();
            finishTime.setTimeInMillis(j.getFinishTime());
            job.put(Response.FINISH_TIME, finishTime.getTime().toString());

            Map<String, Map<String, Long>> groups = new HashMap<String, Map<String, Long>>();
            for (String s : j.getCounters().getGroupNames()) {
                CounterGroup counterGroup = j.getCounters().getGroup(s);
                Iterator<Counter> ite = counterGroup.iterator();

                Map<String, Long> counters = new HashMap<String, Long>();
                groups.put(counterGroup.getDisplayName(), counters);
                while (ite.hasNext()) {
                    Counter counter = (Counter) ite.next();
                    counters.put(counter.getDisplayName(), counter.getValue());
                }
            }

            job.put(Response.GROUPS, groups);
            break;
        }
    }

    return job;
}

From source file:org.springframework.data.hadoop.batch.mapreduce.JobTasklet.java

License:Apache License

private static void saveJobStats(Job job, StepExecution stepExecution) {
    if (stepExecution == null) {
        return;/*ww w  .ja  v a 2 s .co  m*/
    }
    ExecutionContext executionContext = stepExecution.getExecutionContext();
    String statusPrefix = "Job Status::";
    executionContext.put(statusPrefix + "ID", JobUtils.getJobId(job).toString());
    executionContext.put(statusPrefix + "Name", job.getJobName());
    executionContext.put(statusPrefix + "Tracking URL", job.getTrackingURL());
    executionContext.put(statusPrefix + "State", JobUtils.getStatus(job).toString());
    try {
        for (String cgName : job.getCounters().getGroupNames()) {
            CounterGroup group = job.getCounters().getGroup(cgName);
            Iterator<Counter> ci = group.iterator();
            while (ci.hasNext()) {
                Counter c = ci.next();
                executionContext.put(group.getDisplayName().trim() + "::" + c.getDisplayName().trim(),
                        c.getValue());
            }
        }
    } catch (Exception ignore) {
    }
}