Example usage for org.apache.hadoop.mapred RunningJob getJobState

List of usage examples for org.apache.hadoop.mapred RunningJob getJobState

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred RunningJob getJobState.

Prototype

public int getJobState() throws IOException;

Source Link

Document

Returns the current state of the Job.

Usage

From source file:FormatStorage1.MergeFileUtil.java

License:Open Source License

public static void runold(String inputdir, String outputdir, Configuration conf) throws IOException {
    JobConf job = new JobConf(conf);
    job.setJobName("MergeFileUtil");
    job.setJarByClass(MergeFileUtil.class);
    FileSystem fs = null;// w ww . jav  a  2s  .c o  m
    fs = FileSystem.get(job);
    if (fs.exists(new Path(outputdir))) {
        throw new IOException("outputdir: " + outputdir + " exist!!!");
    }

    FileStatus[] fss = fs.listStatus(new Path(inputdir));

    if (fss == null || fss.length <= 0) {
        throw new IOException("no input files");
    }

    for (FileStatus status : fss) {
        if (status.isDir()) {
            throw new IOException("!!!input dir contains directory:\t" + status.getPath().toString());
        }
    }

    IFormatDataFile ifdf = new IFormatDataFile(job);
    ifdf.open(fss[0].getPath().toString());
    job.set("ifdf.head.info", ifdf.fileInfo().head().toStr());
    ifdf.close();

    long wholesize = 0;
    for (FileStatus status : fss) {
        wholesize += status.getLen();
    }

    long fl = 512 * 1024 * 1024;
    int reduces = (int) (wholesize / fl + 1);
    job.setNumReduceTasks(reduces);

    FileInputFormat.setInputPaths(job, inputdir);
    FileOutputFormat.setOutputPath(job, new Path(outputdir));

    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(IRecord.class);

    job.setMapperClass(MergeMap.class);
    job.setReducerClass(MergeReduce.class);

    job.setInputFormat(MergeIFormatInputFormat.class);
    job.setOutputFormat(MergeIFormatOutputFormat.class);

    JobClient jc = new JobClient(job);
    RunningJob rjob = jc.submitJob(job);
    try {

        String lastReport = "";
        SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
        long reportTime = System.currentTimeMillis();
        long maxReportInterval = 3 * 1000;

        while (!rjob.isComplete()) {
            Thread.sleep(1000);

            int mapProgress = Math.round(rjob.mapProgress() * 100);
            int reduceProgress = Math.round(rjob.reduceProgress() * 100);

            String report = " map = " + mapProgress + "%,  reduce = " + reduceProgress + "%";

            if (!report.equals(lastReport) || System.currentTimeMillis() >= reportTime + maxReportInterval) {

                String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
                System.err.println(output);
                lastReport = report;
                reportTime = System.currentTimeMillis();
            }
        }
        LOG.info(rjob.getJobState());

    } catch (IOException e1) {
        e1.printStackTrace();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:FormatStorage1.MergeFileUtil1.java

License:Open Source License

public static void run(String inputdir, String outputdir, Configuration conf) throws IOException {
    JobConf job = new JobConf(conf);
    job.setJobName("MergeFileUtil1");
    job.setJarByClass(MergeFileUtil1.class);
    FileSystem fs = null;/*from   www . j ava2  s .c o m*/
    fs = FileSystem.get(job);
    if (fs.exists(new Path(outputdir))) {
        throw new IOException("outputdir: " + outputdir + " exist!!!");
    }

    FileStatus[] fss = fs.listStatus(new Path(inputdir));

    if (fss == null || fss.length <= 0) {
        throw new IOException("no input files");
    }

    IFormatDataFile ifdf = new IFormatDataFile(job);
    ifdf.open(fss[0].getPath().toString());
    job.set("ifdf.head.info", ifdf.fileInfo().head().toStr());
    ifdf.close();

    long wholesize = 0;
    for (FileStatus status : fss) {
        wholesize += status.getLen();
    }

    job.setNumReduceTasks(0);

    FileInputFormat.setInputPaths(job, inputdir);
    FileOutputFormat.setOutputPath(job, new Path(outputdir));

    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(IRecord.class);

    job.setMapperClass(MergeMap.class);

    job.setInputFormat(CombineFormatStorageFileInputFormat.class);
    job.setOutputFormat(MergeIFormatOutputFormat1.class);

    JobClient jc = new JobClient(job);
    RunningJob rjob = jc.submitJob(job);
    try {

        String lastReport = "";
        SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
        long reportTime = System.currentTimeMillis();
        long maxReportInterval = 3 * 1000;

        while (!rjob.isComplete()) {
            Thread.sleep(1000);

            int mapProgress = Math.round(rjob.mapProgress() * 100);
            int reduceProgress = Math.round(rjob.reduceProgress() * 100);

            String report = " map = " + mapProgress + "%,  reduce = " + reduceProgress + "%";

            if (!report.equals(lastReport) || System.currentTimeMillis() >= reportTime + maxReportInterval) {

                String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
                System.err.println(output);
                lastReport = report;
                reportTime = System.currentTimeMillis();
            }
        }
        LOG.info(rjob.getJobState());

    } catch (IOException e1) {
        e1.printStackTrace();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:FormatStorage1.MergeFileUtil1.java

License:Open Source License

public static void runold(String inputdir, String outputdir, Configuration conf) throws IOException {
    JobConf job = new JobConf(conf);
    job.setJobName("MergeFileUtil");
    job.setJarByClass(MergeFileUtil1.class);
    FileSystem fs = null;/*from  w ww . java  2  s. com*/
    fs = FileSystem.get(job);
    if (fs.exists(new Path(outputdir))) {
        throw new IOException("outputdir: " + outputdir + " exist!!!");
    }

    FileStatus[] fss = fs.listStatus(new Path(inputdir));

    if (fss == null || fss.length <= 0) {
        throw new IOException("no input files");
    }

    for (FileStatus status : fss) {
        if (status.isDir()) {
            throw new IOException("!!!input dir contains directory:\t" + status.getPath().toString());
        }
    }

    IFormatDataFile ifdf = new IFormatDataFile(job);
    ifdf.open(fss[0].getPath().toString());
    job.set("ifdf.head.info", ifdf.fileInfo().head().toStr());
    ifdf.close();

    long wholesize = 0;
    for (FileStatus status : fss) {
        wholesize += status.getLen();
    }

    long fl = 512 * 1024 * 1024;
    int reduces = (int) (wholesize / fl + 1);
    job.setNumReduceTasks(reduces);

    FileInputFormat.setInputPaths(job, inputdir);
    FileOutputFormat.setOutputPath(job, new Path(outputdir));

    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(IRecord.class);

    job.setMapperClass(MergeMap.class);
    job.setReducerClass(MergeReduce.class);

    job.setInputFormat(MergeIFormatInputFormat.class);
    job.setOutputFormat(MergeIFormatOutputFormat.class);

    JobClient jc = new JobClient(job);
    RunningJob rjob = jc.submitJob(job);
    try {

        String lastReport = "";
        SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
        long reportTime = System.currentTimeMillis();
        long maxReportInterval = 3 * 1000;

        while (!rjob.isComplete()) {
            Thread.sleep(1000);

            int mapProgress = Math.round(rjob.mapProgress() * 100);
            int reduceProgress = Math.round(rjob.reduceProgress() * 100);

            String report = " map = " + mapProgress + "%,  reduce = " + reduceProgress + "%";

            if (!report.equals(lastReport) || System.currentTimeMillis() >= reportTime + maxReportInterval) {

                String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
                System.err.println(output);
                lastReport = report;
                reportTime = System.currentTimeMillis();
            }
        }
        LOG.info(rjob.getJobState());

    } catch (IOException e1) {
        e1.printStackTrace();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.hive.hcatalog.templeton.tool.LogRetriever.java

License:Apache License

private void logJob(String logDir, String jobID, PrintWriter listWriter) throws IOException {
    RunningJob rj = jobClient.getJob(JobID.forName(jobID));
    String jobURLString = rj.getTrackingURL();

    Path jobDir = new Path(logDir, jobID);
    fs.mkdirs(jobDir);/*from  w w w.j a va2 s . c  om*/

    // Logger jobconf
    try {
        logJobConf(jobID, jobURLString, jobDir.toString());
    } catch (IOException e) {
        System.err.println("Cannot retrieve job.xml.html for " + jobID);
        e.printStackTrace();
    }

    listWriter.println("job: " + jobID + "(" + "name=" + rj.getJobName() + "," + "status="
            + JobStatus.getJobRunState(rj.getJobState()) + ")");

    // Get completed attempts
    List<AttemptInfo> attempts = new ArrayList<AttemptInfo>();
    for (String type : new String[] { "map", "reduce", "setup", "cleanup" }) {
        try {
            List<AttemptInfo> successAttempts = getCompletedAttempts(jobID, jobURLString, type);
            attempts.addAll(successAttempts);
        } catch (IOException e) {
            System.err.println("Cannot retrieve " + type + " tasks for " + jobID);
            e.printStackTrace();
        }
    }

    // Get failed attempts
    try {
        List<AttemptInfo> failedAttempts = getFailedAttempts(jobID, jobURLString);
        attempts.addAll(failedAttempts);
    } catch (IOException e) {
        System.err.println("Cannot retrieve failed attempts for " + jobID);
        e.printStackTrace();
    }

    // Logger attempts
    for (AttemptInfo attempt : attempts) {
        try {
            logAttempt(jobID, attempt, jobDir.toString());
            listWriter.println("  attempt:" + attempt.id + "(" + "type=" + attempt.type + "," + "status="
                    + attempt.status + "," + "starttime=" + attempt.startTime + "," + "endtime="
                    + attempt.endTime + ")");
        } catch (IOException e) {
            System.err.println("Cannot log attempt " + attempt.id);
            e.printStackTrace();
        }
    }

    listWriter.println();
}

From source file:org.apache.oozie.action.hadoop.LauncherMainTester.java

License:Apache License

private static void executeJavaMapReduce(String[] args) throws IOException, InterruptedException {
    JobConf jConf = createSleepMapperReducerJobConf();
    final Path input = new Path(args[1]);
    FileInputFormat.setInputPaths(jConf, input);
    FileOutputFormat.setOutputPath(jConf, new Path(args[2]));
    writeToFile(input, jConf, "dummy\n", "data.txt");
    JobClient jc = new JobClient(jConf);
    System.out.println("Submitting MR job");
    RunningJob job = jc.submitJob(jConf);
    System.out.println("Submitted job " + job.getID().toString());
    writeToFile(input, jConf, job.getID().toString(), JOB_ID_FILE_NAME);
    job.waitForCompletion();//from  w  ww.  j a  v  a 2 s. c om
    jc.monitorAndPrintJob(jConf, job);
    if (job.getJobState() != JobStatus.SUCCEEDED) {
        System.err.println(job.getJobState() + " job state instead of" + JobStatus.SUCCEEDED);
        System.exit(-1);
    }
}

From source file:org.apache.oozie.command.wf.TestWorkflowActionKillXCommand.java

License:Apache License

public void testWfActionKillChildJob() throws Exception {
    String externalJobID = launchSleepJob(1000);
    String childId = launchSleepJob(1000000);

    WorkflowJobBean job = this.addRecordToWfJobTable(WorkflowJob.Status.KILLED, WorkflowInstance.Status.KILLED);
    WorkflowActionBean action = this.addRecordToWfActionTable(job.getId(), externalJobID, "1",
            WorkflowAction.Status.KILLED, childId);

    new ActionKillXCommand(action.getId()).call();
    JobClient jobClient = createJobClient();

    final RunningJob mrJob = jobClient.getJob(JobID.forName(childId));
    waitFor(60 * 1000, new Predicate() {
        public boolean evaluate() throws Exception {
            return mrJob.isComplete();
        }//www . j a  va  2 s .  c  o m
    });
    assertEquals(mrJob.getJobState(), JobStatus.KILLED);
}

From source file:org.apache.sqoop.submission.mapreduce.MapreduceSubmissionEngine.java

License:Apache License

/**
 * {@inheritDoc}/*from  w  ww  .j a v a  2 s.  c o m*/
 */
@Override
public SubmissionStatus status(String submissionId) {
    try {
        RunningJob runningJob = jobClient.getJob(JobID.forName(submissionId));
        if (runningJob == null) {
            return SubmissionStatus.UNKNOWN;
        }

        int status = runningJob.getJobState();
        return convertMapreduceState(status);

    } catch (IOException e) {
        throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0003, e);
    }
}

From source file:org.dkpro.bigdata.hadoop.DkproHadoopDriver.java

License:Apache License

/**
 * Runs the UIMA pipeline./*  ww  w. java 2  s  .co m*/
 * 
 * @return 0 if Hadoop job succeeded, 1 if job failed, 2 if it was killed, otherwise 3
 * 
 * @see org.apache.hadoop.util.Tool#run(java.lang.String[])
 */
@Override
public int run(String[] args) throws Exception {
    if (args.length < 2) {
        System.out.println(
                "Usage: " + this.getClass().getSimpleName() + " [hadoop-params] input output [job-params]");
        System.exit(1);
    }
    this.job = new JobConf(getConf(), DkproHadoopDriver.class);
    final FileSystem fs = FileSystem.get(this.job);
    // set the factory class name
    this.job.set("dkpro.uima.factory", this.getClass().getName());
    Path inputPath;
    if (args[0].contains(",")) {
        String[] inputPaths = args[0].split(",");
        inputPath = new Path(inputPaths[0]);
        for (String path : inputPaths) {
            FileInputFormat.addInputPath(job, new Path(path));
        }
    } else {
        inputPath = new Path(args[0]); // input
        FileInputFormat.setInputPaths(this.job, inputPath);

    }
    String outDir = args[1];
    if (!getConf().getBoolean("dkpro.output.overwrite", true)) {
        outDir = getUniqueDirectoryName(outDir, fs);
    }
    final Path outputPath = new Path(outDir);// output
    final CollectionReader reader = buildCollectionReader();
    // if a collection reader was defined, import data into hdfs
    // try {
    // final Class<?> c = Class.forName("org.apache.hadoop.io.compress.SnappyCodec");
    // FileOutputFormat.setOutputCompressorClass(this.job,
    // (Class<? extends CompressionCodec>) c);
    // }
    // catch (final Exception e) {
    //
    // }
    if (reader != null) {
        final AnalysisEngine xcasWriter = AnalysisEngineFactory.createEngine(
                CASWritableSequenceFileWriter.class, // createTypeSystemDescription(),
                CASWritableSequenceFileWriter.PARAM_PATH, inputPath.toString(),
                CASWritableSequenceFileWriter.PARAM_COMPRESS, true, CASWritableSequenceFileWriter.PARAM_FS,
                job.get(("fs.default.name"), "file:/"));
        runPipeline(reader, xcasWriter);
    }
    // cleanup previous output
    fs.delete(outputPath, true);
    // this is a sensible default for the UKP cluster
    //        int numMappers = 256;
    // if (args.length > 2) {
    // numMappers = Integer.parseInt(args[2]);
    // }

    FileOutputFormat.setOutputPath(this.job, outputPath);
    // SequenceFileOutputFormat.setCompressOutput(this.job, true);

    if (this.job.get("mapred.output.compress") == null) {
        this.job.setBoolean("mapred.output.compress", true);
    }
    // Just in case compression is on
    this.job.set("mapred.output.compression.type", "BLOCK");

    if (this.job.getBoolean("dkpro.output.writecas", true)) {
        if (this.job.getBoolean("dkpro.output.plaintext", false)) {
            this.job.setOutputFormat(TextOutputFormat.class);
        } else {
            this.job.setOutputFormat(SequenceFileOutputFormat.class);
        }
    } else {
        job.setOutputFormat(NullOutputFormat.class);
    }
    // this.job.set("mapred.output.compression.codec",
    // "org.apache.hadoop.io.compress.GzipCodec");
    // use compression
    // setup some sensible defaults
    this.job.setMapperClass(this.mapperClass);
    this.job.setReducerClass(this.reducerClass);
    if (getInputFormatClass() != null) {
        this.job.setInputFormat(getInputFormatClass());
    } else {
        this.job.setInputFormat(SequenceFileInputFormat.class);
    }
    // this.job.setOutputFormat(TextOutputFormat.class);
    this.job.setMapOutputKeyClass(Text.class);
    this.job.setMapOutputValueClass(BinCasWithTypeSystemWritable.class);
    this.job.setOutputKeyClass(Text.class);
    this.job.setOutputValueClass(BinCasWithTypeSystemWritable.class);
    this.job.setJobName(this.getClass().getSimpleName());
    // this.job.set("mapred.child.java.opts", "-Xmx1g");
    //        this.job.setInt("mapred.job.map.memory.mb", 1280);
    //        this.job.setInt("mapred.job.reduce.memory.mb", 1280);
    //        this.job.setNumMapTasks(numMappers);
    this.job.setNumReduceTasks(0);
    configure(this.job);

    // create symlinks for distributed resources
    DistributedCache.createSymlink(this.job);
    // sLogger.info("Running job "+job.getJobName());

    RunningJob runningJob = JobClient.runJob(this.job);
    runningJob.waitForCompletion();
    int status = runningJob.getJobState();
    if (status == JobStatus.SUCCEEDED) {
        return 0;
    } else if (status == JobStatus.FAILED) {
        return 1;
    } else if (status == JobStatus.KILLED) {
        return 2;
    } else {
        return 3;
    }

}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public REXP getstatus(String jd, boolean geterrors) throws Exception {
    org.apache.hadoop.mapred.JobID jj = org.apache.hadoop.mapred.JobID.forName(jd);
    if (jj == null)
        throw new IOException("Jobtracker could not find jobID: " + jd);
    org.apache.hadoop.mapred.RunningJob rj = jclient.getJob(jj);
    if (rj == null)
        throw new IOException(
                "No such job: " + jd + " available, wrong job? or try the History Viewer (see the Web UI) ");
    String jobfile = rj.getJobFile();
    String jobname = rj.getJobName();
    // cfg.addResource(new Path(jobfile));
    org.apache.hadoop.mapred.Counters cc = rj.getCounters();
    long startsec = getStart(jclient, jj);
    double dura = ((double) System.currentTimeMillis() - startsec) / 1000;
    REXP ro = FileUtils.buildlistFromOldCounter(cc, dura);
    int jobs = rj.getJobState();
    String jobss = null;/*from   www . j  av a2  s . c  om*/
    if (jobs == JobStatus.FAILED)
        jobss = "FAILED";
    else if (jobs == JobStatus.KILLED)
        jobss = "KILLED";
    else if (jobs == JobStatus.PREP)
        jobss = "PREP";
    else if (jobs == JobStatus.RUNNING)
        jobss = "RUNNING";
    else if (jobs == JobStatus.SUCCEEDED)
        jobss = "SUCCEEDED";
    float mapprog = rj.mapProgress(), reduprog = rj.reduceProgress();

    org.apache.hadoop.mapred.TaskReport[] maptr = jclient.getMapTaskReports(jj);
    org.apache.hadoop.mapred.TaskReport[] redtr = jclient.getReduceTaskReports(jj);

    int totalmaps = maptr.length, totalreds = redtr.length;
    int mappending = 0, redpending = 0, maprunning = 0, redrunning = 0, redfailed = 0, redkilled = 0,
            mapkilled = 0, mapfailed = 0, mapcomp = 0, redcomp = 0;
    for (int i = 0; i < maptr.length; i++) {
        TIPStatus t = maptr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            mapcomp++;
            break;
        case FAILED:
            mapfailed++;
            break;
        case PENDING:
            mappending++;
            break;
        case RUNNING:
            maprunning++;
            break;
        case KILLED:
            mapkilled++;
            break;
        }
    }
    for (int i = 0; i < redtr.length; i++) {
        TIPStatus t = redtr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            redcomp++;
            break;
        case FAILED:
            redfailed++;
            break;
        case PENDING:
            redpending++;
            break;
        case RUNNING:
            redrunning++;
            break;
        case KILLED:
            redkilled++;
            break;
        }
    }
    int reduceafails = 0, reduceakilled = 0, mapafails = 0, mapakilled = 0;
    int startfrom = 0;

    REXP.Builder errcontainer = REXP.newBuilder();
    errcontainer.setRclass(REXP.RClass.STRING);
    while (true) {
        org.apache.hadoop.mapred.TaskCompletionEvent[] events = rj.getTaskCompletionEvents(startfrom);
        for (int i = 0; i < events.length; i++) {
            org.apache.hadoop.mapred.TaskCompletionEvent e = events[i];
            int f = 0, k = 0;
            switch (e.getTaskStatus()) {
            case KILLED:
                if (e.isMapTask()) {
                    mapakilled++;
                } else {
                    reduceakilled++;
                }
                break;
            case TIPFAILED:
            case FAILED:
                if (e.isMapTask()) {
                    mapafails++;
                } else {
                    reduceafails++;
                }
                if (geterrors) {
                    REXPProtos.STRING.Builder content = REXPProtos.STRING.newBuilder();
                    String[] s = rj.getTaskDiagnostics(e.getTaskAttemptId());
                    if (s != null && s.length > 0) {
                        content.setStrval(s[0]);
                        errcontainer.addStringValue(content.build());
                    }
                }
                break;
            }
        }
        startfrom += events.length;
        if (events.length == 0)
            break;
    }

    REXP.Builder thevals = REXP.newBuilder();
    thevals.setRclass(REXP.RClass.LIST);
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobss }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { dura }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { (double) mapprog, (double) reduprog }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalmaps, mappending, maprunning, mapcomp, mapkilled, mapafails, mapakilled }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalreds, redpending, redrunning, redcomp, redkilled, reduceafails, reduceakilled }));
    thevals.addRexpValue(ro);
    thevals.addRexpValue(errcontainer);
    thevals.addRexpValue(RObjects.makeStringVector(rj.getTrackingURL()));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobname }));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobfile }));
    return (thevals.build());
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public byte[] getDetailedInfoForJob(String jd) throws Exception {
    org.apache.hadoop.mapred.JobID jj = org.apache.hadoop.mapred.JobID.forName(jd);
    if (jj == null)
        throw new IOException("Jobtracker could not find jobID: " + jd);
    org.apache.hadoop.mapred.RunningJob rj = jclient.getJob(jj);
    if (rj == null)
        throw new IOException(
                "No such job: " + jd + " available, wrong job? or try the History Viewer (see the Web UI) ");
    String jobfile = rj.getJobFile();
    String jobname = rj.getJobName();
    org.apache.hadoop.mapred.Counters cc = rj.getCounters();
    long startsec = getStart(jclient, jj);
    REXP allCounters = FileUtils.buildlistFromOldCounter(cc, 0);
    int jobs = rj.getJobState();
    String jobss = null;/*  www . j a v a  2s.co m*/
    if (jobs == JobStatus.FAILED)
        jobss = "FAILED";
    else if (jobs == JobStatus.KILLED)
        jobss = "KILLED";
    else if (jobs == JobStatus.PREP)
        jobss = "PREP";
    else if (jobs == JobStatus.RUNNING)
        jobss = "RUNNING";
    else if (jobs == JobStatus.SUCCEEDED)
        jobss = "SUCCEEDED";
    float mapprog = rj.mapProgress(), reduprog = rj.reduceProgress();

    REXP.Builder thevals = REXP.newBuilder();
    thevals.setRclass(REXP.RClass.LIST);
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobss }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { startsec }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { (double) mapprog, (double) reduprog }));
    thevals.addRexpValue(allCounters);
    thevals.addRexpValue(RObjects.makeStringVector(rj.getTrackingURL()));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobname }));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobfile }));

    org.apache.hadoop.mapred.TaskReport[] maptr = jclient.getMapTaskReports(jj);
    REXP.Builder thevalsA = REXP.newBuilder();
    thevalsA.setRclass(REXP.RClass.LIST);
    for (TaskReport t : maptr) {
        thevalsA.addRexpValue(TaskReportToRexp(t));
    }
    thevals.addRexpValue(thevalsA.build());

    org.apache.hadoop.mapred.TaskReport[] redtr = jclient.getReduceTaskReports(jj);
    REXP.Builder thevalsB = REXP.newBuilder();
    thevalsB.setRclass(REXP.RClass.LIST);
    for (TaskReport t : redtr) {
        thevalsB.addRexpValue(TaskReportToRexp(t));
    }
    thevals.addRexpValue(thevalsB.build());

    return thevals.build().toByteArray();
}