Example usage for org.apache.hadoop.mapred RunningJob getID

List of usage examples for org.apache.hadoop.mapred RunningJob getID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred RunningJob getID.

Prototype

public JobID getID();

Source Link

Document

Get the job identifier.

Usage

From source file:dataload.LogFetchJobTracker.java

License:Apache License

/**
 * This is the method to get all of the jobParameters for a given job
 * @param id//from  w w w. j a v a2  s  . com
 * @throws IOException
 * @throws SQLException
 */
public void getJobParameters(JobID id) throws IOException, SQLException {
    RunningJob job = client.getJob(id);
    Counters c = job.getCounters();
    Iterator<Counters.Group> itrG = c.iterator();

    PreparedStatement prepStatement = connection.prepareStatement(
            "INSERT INTO jobParameters VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
    for (int i = 1; i < 31; i++) {
        prepStatement.setLong(i, 0);
    }

    prepStatement.setString(31, job.getID().toString());
    prepStatement.setString(32, job.getJobName());
    prepStatement.setLong(33, totalTime);

    while (itrG.hasNext()) {
        Iterator<Counters.Counter> itrC = itrG.next().iterator();

        while (itrC.hasNext()) {
            Counters.Counter counter = itrC.next();
            if (mapJob.get(counter.getName()) != null) {
                prepStatement.setLong(mapJob.get(counter.getName()), counter.getCounter());
            }
        }
    }
    prepStatement.executeUpdate();
}

From source file:edu.stolaf.cs.wmrserver.HadoopEngine.java

License:Apache License

public void submit(JobRequest request, long submissionID, File mapperFile, File reducerFile, File packageDir,
        Path inputPath) throws ValidationException, NotFoundException, CompilationException, InternalException {
    // Generate job output path
    Path outputDir = new Path(_homeDir, "out");
    Path outputPath;/*from   w  w w  . jav  a  2  s  .c o  m*/
    try {
        FileSystem fs = outputDir.getFileSystem(new Configuration());
        outputPath = JobServiceHandler.getNonexistantPath(outputDir, request.getName(), fs);
    } catch (IOException ex) {
        throw JobServiceHandler.wrapException("Could not construct output path.", ex);
    }

    JobConf conf = new JobConf();
    conf.setJobName(request.getName());

    // Set mapper and number of tasks if specified
    StreamJob.setStreamMapper(conf, mapperFile.toString());
    if (request.isSetMapTasks())
        conf.setNumMapTasks(request.getMapTasks());

    // Set reducer and number of tasks if specified
    StreamJob.setStreamReducer(conf, reducerFile.toString());
    if (request.isSetReduceTasks())
        conf.setNumReduceTasks(request.getReduceTasks());

    // Create and set job JAR, including necessary files
    ArrayList<String> jarFiles = new ArrayList<String>();
    jarFiles.add(packageDir.toString());
    String jarPath;
    try {
        jarPath = StreamJob.createJobJar(conf, jarFiles, _tempDir);
    } catch (IOException ex) {
        throw JobServiceHandler.wrapException("Could not create job jar.", ex);
    }
    if (jarPath != null)
        conf.setJar(jarPath);

    // TODO: This is a hack. Rewrite streaming to use DistributedCache.
    //conf.setPattern("mapreduce.job.jar.unpack.pattern",
    //              Pattern.compile(".*"));

    // Set I/O formats and paths
    conf.setInputFormat(KeyValueTextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(Text.class);
    FileInputFormat.addInputPath(conf, inputPath);
    FileOutputFormat.setOutputPath(conf, outputPath);

    // Use numeric sort if appropriate
    conf.setBoolean(CONF_NUMERIC, request.isNumericSort());
    if (request.isNumericSort()) {
        conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
        conf.setPartitionerClass(KeyFieldBasedPartitioner.class);
        conf.setKeyFieldComparatorOptions("-n");
        conf.setKeyFieldPartitionerOptions("-n");
    }

    // Set other job information
    conf.set(CONF_USER, request.getUser());
    conf.set(CONF_LANGUAGE, request.getLanguage());
    conf.set(CONF_MAPPER, request.getMapper());
    conf.set(CONF_REDUCER, request.getReducer());

    // Attempt to submit the job

    RunningJob job;
    try {
        JobClient client = new JobClient(new JobConf());
        job = client.submitJob(conf);
    } catch (IOException ex) {
        throw JobServiceHandler.wrapException("There was a serious error while attempting to submit the job.",
                ex);
    }

    try {
        SubmissionDatabase.setSubmitted(submissionID);
        SubmissionDatabase.setHadoopID(submissionID, job.getID().toString());
    } catch (SQLException ex) {
        throw JobServiceHandler.wrapException("Could not update submission in database.", ex);
    }
}

From source file:edu.stolaf.cs.wmrserver.HadoopEngine.java

License:Apache License

public JobStatus getStatus(Submission submission) throws NotFoundException, InternalException {
    RunningJob job = getJob(submission);
    JobConf conf = loadJobConfiguration(job);

    JobStatus status = new JobStatus();
    status.setInfo(getInfo(submission, job, conf));

    try {/* w w w . ja v  a  2s .  c  o  m*/
        JobClient client = new JobClient(new JobConf());

        // Get job state
        // Thanks to the mentally handicapped switch statement, we have
        // to use a chain of ifs. Fuck Java.
        int jobState = job.getJobState();
        if (jobState == org.apache.hadoop.mapred.JobStatus.FAILED)
            status.setState(State.FAILED);
        else if (jobState == org.apache.hadoop.mapred.JobStatus.SUCCEEDED)
            status.setState(State.SUCCESSFUL);
        else if (jobState == org.apache.hadoop.mapred.JobStatus.KILLED)
            status.setState(State.KILLED);
        else if (jobState == org.apache.hadoop.mapred.JobStatus.RUNNING)
            status.setState(State.RUNNING);
        else
            status.setState(State.PREP);

        // Get task counts
        TaskReport[] mapTaskReports = client.getMapTaskReports(job.getID());
        TaskReport[] reduceTaskReports = client.getReduceTaskReports(job.getID());

        // Get failed task logs
        TaskCompletionEvent[] events = job.getTaskCompletionEvents(0);
        Pair<ArrayList<TaskLog>, ArrayList<TaskLog>> failures;
        if (events != null)
            failures = getLogsFromCompletionEvents(events);
        else
            failures = getLogsFromHistory(job, new Configuration());
        ArrayList<TaskLog> mapFailures = failures.first;
        ArrayList<TaskLog> reduceFailures = failures.second;

        // Get other mapper info
        PhaseStatus mapStatus = new PhaseStatus();
        mapStatus.setProgress(job.mapProgress() * 100);
        if (!mapFailures.isEmpty())
            mapStatus.setErrors(getMeaningfulTaskLog(mapFailures));
        if (mapTaskReports != null)
            mapStatus.setTotalTasks(mapTaskReports.length);
        // TODO: Handle the state in a sane way
        mapStatus.setState(status.getState());
        status.setMapStatus(mapStatus);

        // Get other reducer info
        PhaseStatus reduceStatus = new PhaseStatus();
        reduceStatus.setProgress(job.reduceProgress() * 100);
        if (!reduceFailures.isEmpty())
            reduceStatus.setErrors(getMeaningfulTaskLog(reduceFailures));
        reduceStatus.setState(status.getState());
        if (reduceTaskReports != null)
            reduceStatus.setTotalTasks(reduceTaskReports.length);
        if (conf != null)
            reduceStatus.setOutputPath(FileOutputFormat.getOutputPath(conf).toString());
        status.setReduceStatus(reduceStatus);
    } catch (Exception ex) {
        throw JobServiceHandler.wrapException("Could not get job info.", ex);
    }

    return status;
}

From source file:edu.stolaf.cs.wmrserver.HadoopEngine.java

License:Apache License

private JobConf loadJobConfiguration(RunningJob job) throws InternalException {
    // Try normal job file
    try {/*from  w  w w. ja  va  2  s. com*/
        JobConf conf = new JobConf();
        Path jobFile = new Path(job.getJobFile());
        FileSystem fs = jobFile.getFileSystem(new Configuration());
        conf.addResource(fs.open(jobFile));

        return conf;
    } catch (IOException ex) {
    } catch (IllegalArgumentException ex) {
    }

    // Hadoop 0.20 only
    return new JobConf(org.apache.hadoop.mapred.JobTracker.getLocalJobFilePath(job.getID()));

    /*
    // Try to retrieve configuration from history
    // Hadoop 0.21 only!
    try
    {
       Method m = JobTracker.class.getMethod("getLocalJobFilePath", JobID.class);
       String jobFile = m.invoke(null, job.getID());
       return new JobConf(jobFile);
    }
    catch (NoSuchMethodException ex)
    {
    }
    catch (SecurityException ex)
    {
    }
            
    // Try to retrieve configuration from history (0.21 only)
    try
    {
       Method getHistoryUrl = job.getClass().getMethod("getHistoryUrl");
               
       Path historyPath = new Path(getHistoryUrl.invoke(job));
       Path historyDir = historyPath.getParent();
               
       Class jobHistoryClass = Class.forName(
       "org.apache.hadoop.mapreduce.jobhistory.JobHistory");
       Method getConfFile = jobHistoryClass.getMethod(
       "getConfFile", Path.class, JobID.class);
               
       Path jobFile = getConfFile.invoke(null, historyDir, job.getID());
               
       return new JobConf(jobFile);
    }
    catch (IOException ex)
    {
    }
    catch (IllegalArgumentException ex)
    {
       // Thrown for empty string in Path
       // This should only be temporary
    }
            
    return null;
    */
}

From source file:hydrograph.engine.cascading.flow.CustomFlowStepListener.java

License:Apache License

@Override
public boolean onStepThrowable(FlowStep flowstep, Throwable arg1) {
    for (Flow flow : flows) {
        List<FlowStepStats> flows = flow.getFlowStats().getFlowStepStats();
        for (FlowStepStats flowStat : flows) {
            HadoopStepStats stats = (HadoopStepStats) flowStat;
            try {
                RunningJob runningJob = stats.getJobStatusClient();
                if (runningJob != null) {
                    JobID jobID = runningJob.getID();
                    LOG.error("Killing Job " + jobID.getId());
                    runningJob.killJob();
                    LOG.info("Job: '" + jobID.getId() + "' started at: " + stats.getStartTime()
                            + " killed successfully!");
                }//from w w  w  . ja  v  a  2  s . co  m
            } catch (Exception e) {
                LOG.error("", e);
                throw new RuntimeException(e);
            }
        }
    }
    return true;
}

From source file:org.apache.accumulo.server.master.CoordinateRecoveryTask.java

License:Apache License

void cleanupOldJobs() {
    try {//from  w w  w.j a  va  2  s  .  c om
        Configuration conf = CachedConfiguration.getInstance();
        @SuppressWarnings("deprecation")
        JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf));
        for (JobStatus status : jc.getAllJobs()) {
            if (!status.isJobComplete()) {
                RunningJob job = jc.getJob(status.getJobID());
                if (job.getJobName().equals(LogSort.getJobName())) {
                    log.info("found a running " + job.getJobName());
                    Configuration jobConfig = new Configuration(false);
                    log.info("fetching configuration from " + job.getJobFile());
                    jobConfig.addResource(TraceFileSystem
                            .wrap(FileUtil.getFileSystem(conf, ServerConfiguration.getSiteConfiguration()))
                            .open(new Path(job.getJobFile())));
                    if (HdfsZooInstance.getInstance().getInstanceID()
                            .equals(jobConfig.get(LogSort.INSTANCE_ID_PROPERTY))) {
                        log.info("Killing job " + job.getID().toString());
                    }
                }
            }
        }
        FileStatus[] children = fs.listStatus(new Path(ServerConstants.getRecoveryDir()));
        if (children != null) {
            for (FileStatus child : children) {
                log.info("Deleting recovery directory " + child);
                fs.delete(child.getPath(), true);
            }
        }
    } catch (IOException e) {
        log.error("Error cleaning up old Log Sort jobs" + e);
    } catch (Exception e) {
        log.error("Unknown error cleaning up old jobs", e);
    }
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

public void submitLauncher(FileSystem actionFs, Context context, WorkflowAction action)
        throws ActionExecutorException {
    JobClient jobClient = null;/*from  www. j  av a  2s.co  m*/
    boolean exception = false;
    try {
        Path appPathRoot = new Path(context.getWorkflow().getAppPath());

        // app path could be a file
        if (actionFs.isFile(appPathRoot)) {
            appPathRoot = appPathRoot.getParent();
        }

        Element actionXml = XmlUtils.parseXml(action.getConf());

        // action job configuration
        Configuration actionConf = loadHadoopDefaultResources(context, actionXml);
        setupActionConf(actionConf, context, actionXml, appPathRoot);
        LOG.debug("Setting LibFilesArchives ");
        setLibFilesArchives(context, actionXml, appPathRoot, actionConf);

        String jobName = actionConf.get(HADOOP_JOB_NAME);
        if (jobName == null || jobName.isEmpty()) {
            jobName = XLog.format("oozie:action:T={0}:W={1}:A={2}:ID={3}", getType(),
                    context.getWorkflow().getAppName(), action.getName(), context.getWorkflow().getId());
            actionConf.set(HADOOP_JOB_NAME, jobName);
        }

        injectActionCallback(context, actionConf);

        if (actionConf.get(ACL_MODIFY_JOB) == null || actionConf.get(ACL_MODIFY_JOB).trim().equals("")) {
            // ONLY in the case where user has not given the
            // modify-job ACL specifically
            if (context.getWorkflow().getAcl() != null) {
                // setting the group owning the Oozie job to allow anybody in that
                // group to modify the jobs.
                actionConf.set(ACL_MODIFY_JOB, context.getWorkflow().getAcl());
            }
        }

        // Setting the credential properties in launcher conf
        JobConf credentialsConf = null;
        HashMap<String, CredentialsProperties> credentialsProperties = setCredentialPropertyToActionConf(
                context, action, actionConf);
        if (credentialsProperties != null) {

            // Adding if action need to set more credential tokens
            credentialsConf = new JobConf(false);
            XConfiguration.copy(actionConf, credentialsConf);
            setCredentialTokens(credentialsConf, context, action, credentialsProperties);

            // insert conf to action conf from credentialsConf
            for (Entry<String, String> entry : credentialsConf) {
                if (actionConf.get(entry.getKey()) == null) {
                    actionConf.set(entry.getKey(), entry.getValue());
                }
            }
        }

        JobConf launcherJobConf = createLauncherConf(actionFs, context, action, actionXml, actionConf);

        LOG.debug("Creating Job Client for action " + action.getId());
        jobClient = createJobClient(context, launcherJobConf);
        String launcherId = LauncherMapperHelper.getRecoveryId(launcherJobConf, context.getActionDir(),
                context.getRecoveryId());
        boolean alreadyRunning = launcherId != null;
        RunningJob runningJob;

        // if user-retry is on, always submit new launcher
        boolean isUserRetry = ((WorkflowActionBean) action).isUserRetry();

        if (alreadyRunning && !isUserRetry) {
            runningJob = jobClient.getJob(JobID.forName(launcherId));
            if (runningJob == null) {
                String jobTracker = launcherJobConf.get(HADOOP_JOB_TRACKER);
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "JA017",
                        "unknown job [{0}@{1}], cannot recover", launcherId, jobTracker);
            }
        } else {
            LOG.debug("Submitting the job through Job Client for action " + action.getId());

            // setting up propagation of the delegation token.
            HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
            Token<DelegationTokenIdentifier> mrdt = jobClient
                    .getDelegationToken(has.getMRDelegationTokenRenewer(launcherJobConf));
            launcherJobConf.getCredentials().addToken(HadoopAccessorService.MR_TOKEN_ALIAS, mrdt);

            // insert credentials tokens to launcher job conf if needed
            if (needInjectCredentials() && credentialsConf != null) {
                for (Token<? extends TokenIdentifier> tk : credentialsConf.getCredentials().getAllTokens()) {
                    Text fauxAlias = new Text(tk.getKind() + "_" + tk.getService());
                    LOG.debug("ADDING TOKEN: " + fauxAlias);
                    launcherJobConf.getCredentials().addToken(fauxAlias, tk);
                }
            } else {
                LOG.info("No need to inject credentials.");
            }
            runningJob = jobClient.submitJob(launcherJobConf);
            if (runningJob == null) {
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "JA017",
                        "Error submitting launcher for action [{0}]", action.getId());
            }
            launcherId = runningJob.getID().toString();
            LOG.debug("After submission get the launcherId " + launcherId);
        }

        String jobTracker = launcherJobConf.get(HADOOP_JOB_TRACKER);
        String consoleUrl = runningJob.getTrackingURL();
        context.setStartData(launcherId, jobTracker, consoleUrl);
    } catch (Exception ex) {
        exception = true;
        throw convertException(ex);
    } finally {
        if (jobClient != null) {
            try {
                jobClient.close();
            } catch (Exception e) {
                if (exception) {
                    LOG.error("JobClient error: ", e);
                } else {
                    throw convertException(e);
                }
            }
        }
    }
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

@Override
public void check(Context context, WorkflowAction action) throws ActionExecutorException {
    JobClient jobClient = null;//  w  w w .j av a  2s.  co m
    boolean exception = false;
    LogUtils.setLogInfo(action);
    try {
        Element actionXml = XmlUtils.parseXml(action.getConf());
        FileSystem actionFs = context.getAppFileSystem();
        JobConf jobConf = createBaseHadoopConf(context, actionXml);
        jobClient = createJobClient(context, jobConf);
        RunningJob runningJob = getRunningJob(context, action, jobClient);
        if (runningJob == null) {
            context.setExecutionData(FAILED, null);
            throw new ActionExecutorException(ActionExecutorException.ErrorType.FAILED, "JA017",
                    "Could not lookup launched hadoop Job ID [{0}] which was associated with "
                            + " action [{1}].  Failing this action!",
                    getActualExternalId(action), action.getId());
        }
        if (runningJob.isComplete()) {
            Path actionDir = context.getActionDir();
            String newId = null;
            // load sequence file into object
            Map<String, String> actionData = LauncherMapperHelper.getActionData(actionFs, actionDir, jobConf);
            if (actionData.containsKey(LauncherMapper.ACTION_DATA_NEW_ID)) {
                newId = actionData.get(LauncherMapper.ACTION_DATA_NEW_ID);
                String launcherId = action.getExternalId();
                runningJob = jobClient.getJob(JobID.forName(newId));
                if (runningJob == null) {
                    context.setExternalStatus(FAILED);
                    throw new ActionExecutorException(ActionExecutorException.ErrorType.FAILED, "JA017",
                            "Unknown hadoop job [{0}] associated with action [{1}].  Failing this action!",
                            newId, action.getId());
                }
                context.setExternalChildIDs(newId);
                LOG.info(XLog.STD, "External ID swap, old ID [{0}] new ID [{1}]", launcherId, newId);
            } else {
                String externalIDs = actionData.get(LauncherMapper.ACTION_DATA_EXTERNAL_CHILD_IDS);
                if (externalIDs != null) {
                    context.setExternalChildIDs(externalIDs);
                    LOG.info(XLog.STD, "Hadoop Jobs launched : [{0}]", externalIDs);
                }
            }
            if (runningJob.isComplete()) {
                // fetching action output and stats for the Map-Reduce action.
                if (newId != null) {
                    actionData = LauncherMapperHelper.getActionData(actionFs, context.getActionDir(), jobConf);
                }
                LOG.info(XLog.STD, "action completed, external ID [{0}]", action.getExternalId());
                if (LauncherMapperHelper.isMainSuccessful(runningJob)) {
                    if (getCaptureOutput(action) && LauncherMapperHelper.hasOutputData(actionData)) {
                        context.setExecutionData(SUCCEEDED, PropertiesUtils
                                .stringToProperties(actionData.get(LauncherMapper.ACTION_DATA_OUTPUT_PROPS)));
                        LOG.info(XLog.STD, "action produced output");
                    } else {
                        context.setExecutionData(SUCCEEDED, null);
                    }
                    if (LauncherMapperHelper.hasStatsData(actionData)) {
                        context.setExecutionStats(actionData.get(LauncherMapper.ACTION_DATA_STATS));
                        LOG.info(XLog.STD, "action produced stats");
                    }
                    getActionData(actionFs, runningJob, action, context);
                } else {
                    String errorReason;
                    if (actionData.containsKey(LauncherMapper.ACTION_DATA_ERROR_PROPS)) {
                        Properties props = PropertiesUtils
                                .stringToProperties(actionData.get(LauncherMapper.ACTION_DATA_ERROR_PROPS));
                        String errorCode = props.getProperty("error.code");
                        if ("0".equals(errorCode)) {
                            errorCode = "JA018";
                        }
                        if ("-1".equals(errorCode)) {
                            errorCode = "JA019";
                        }
                        errorReason = props.getProperty("error.reason");
                        LOG.warn("Launcher ERROR, reason: {0}", errorReason);
                        String exMsg = props.getProperty("exception.message");
                        String errorInfo = (exMsg != null) ? exMsg : errorReason;
                        context.setErrorInfo(errorCode, errorInfo);
                        String exStackTrace = props.getProperty("exception.stacktrace");
                        if (exMsg != null) {
                            LOG.warn("Launcher exception: {0}{E}{1}", exMsg, exStackTrace);
                        }
                    } else {
                        errorReason = XLog.format("LauncherMapper died, check Hadoop LOG for job [{0}:{1}]",
                                action.getTrackerUri(), action.getExternalId());
                        LOG.warn(errorReason);
                    }
                    context.setExecutionData(FAILED_KILLED, null);
                }
            } else {
                context.setExternalStatus("RUNNING");
                LOG.info(XLog.STD, "checking action, hadoop job ID [{0}] status [RUNNING]", runningJob.getID());
            }
        } else {
            context.setExternalStatus("RUNNING");
            LOG.info(XLog.STD, "checking action, hadoop job ID [{0}] status [RUNNING]", runningJob.getID());
        }
    } catch (Exception ex) {
        LOG.warn("Exception in check(). Message[{0}]", ex.getMessage(), ex);
        exception = true;
        throw convertException(ex);
    } finally {
        if (jobClient != null) {
            try {
                jobClient.close();
            } catch (Exception e) {
                if (exception) {
                    LOG.error("JobClient error: ", e);
                } else {
                    throw convertException(e);
                }
            }
        }
    }
}

From source file:org.apache.oozie.action.hadoop.LauncherMainTester.java

License:Apache License

private static void executeJavaMapReduce(String[] args) throws IOException, InterruptedException {
    JobConf jConf = createSleepMapperReducerJobConf();
    final Path input = new Path(args[1]);
    FileInputFormat.setInputPaths(jConf, input);
    FileOutputFormat.setOutputPath(jConf, new Path(args[2]));
    writeToFile(input, jConf, "dummy\n", "data.txt");
    JobClient jc = new JobClient(jConf);
    System.out.println("Submitting MR job");
    RunningJob job = jc.submitJob(jConf);
    System.out.println("Submitted job " + job.getID().toString());
    writeToFile(input, jConf, job.getID().toString(), JOB_ID_FILE_NAME);
    job.waitForCompletion();/*from   w w w  .jav  a  2s  .  c o  m*/
    jc.monitorAndPrintJob(jConf, job);
    if (job.getJobState() != JobStatus.SUCCEEDED) {
        System.err.println(job.getJobState() + " job state instead of" + JobStatus.SUCCEEDED);
        System.exit(-1);
    }
}

From source file:org.apache.oozie.action.hadoop.MapReduceMain.java

License:Apache License

protected void run(String[] args) throws Exception {
    System.out.println();/*from   www  . ja va 2s.  c o  m*/
    System.out.println("Oozie Map-Reduce action configuration");
    System.out.println("=======================");

    // loading action conf prepared by Oozie
    Configuration actionConf = new Configuration(false);
    actionConf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));
    setYarnTag(actionConf);

    JobConf jobConf = new JobConf();
    addActionConf(jobConf, actionConf);
    LauncherMainHadoopUtils.killChildYarnJobs(jobConf);

    // Run a config class if given to update the job conf
    runConfigClass(jobConf);

    logMasking("Map-Reduce job configuration:", new HashSet<String>(), jobConf);

    File idFile = new File(
            System.getProperty(LauncherMapper.ACTION_PREFIX + LauncherMapper.ACTION_DATA_NEW_ID));
    System.out.println("Submitting Oozie action Map-Reduce job");
    System.out.println();
    // submitting job
    RunningJob runningJob = submitJob(jobConf);

    String jobId = runningJob.getID().toString();
    writeJobIdFile(idFile, jobId);

    System.out.println("=======================");
    System.out.println();
}