Example usage for org.apache.hadoop.mapreduce Job killJob

List of usage examples for org.apache.hadoop.mapreduce Job killJob

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job killJob.

Prototype

public void killJob() throws IOException 

Source Link

Document

Kill the running job.

Usage

From source file:org.apache.kylin.engine.mr.common.MapReduceExecutable.java

License:Apache License

@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
    final String mapReduceJobClass = getMapReduceJobClass();
    String params = getMapReduceParams();
    Preconditions.checkNotNull(mapReduceJobClass);
    Preconditions.checkNotNull(params);//  ww  w  .j  av  a2 s.  c om
    try {
        Job job;
        ExecutableManager mgr = getManager();
        final Map<String, String> extra = mgr.getOutput(getId()).getExtra();
        if (extra.containsKey(ExecutableConstants.MR_JOB_ID)) {
            Configuration conf = HadoopUtil.getCurrentConfiguration();
            job = new Cluster(conf).getJob(JobID.forName(extra.get(ExecutableConstants.MR_JOB_ID)));
            logger.info("mr_job_id:" + extra.get(ExecutableConstants.MR_JOB_ID) + " resumed");
        } else {
            final Constructor<? extends AbstractHadoopJob> constructor = ClassUtil
                    .forName(mapReduceJobClass, AbstractHadoopJob.class).getConstructor();
            final AbstractHadoopJob hadoopJob = constructor.newInstance();
            hadoopJob.setConf(HadoopUtil.getCurrentConfiguration());
            hadoopJob.setAsync(true); // so the ToolRunner.run() returns right away
            logger.info("parameters of the MapReduceExecutable:");
            logger.info(params);
            String[] args = params.trim().split("\\s+");
            try {
                //for async mr job, ToolRunner just return 0;

                // use this method instead of ToolRunner.run() because ToolRunner.run() is not thread-sale
                // Refer to: http://stackoverflow.com/questions/22462665/is-hadoops-toorunner-thread-safe
                MRUtil.runMRJob(hadoopJob, args);

                if (hadoopJob.isSkipped()) {
                    return new ExecuteResult(ExecuteResult.State.SUCCEED, "skipped");
                }
            } catch (Exception ex) {
                StringBuilder log = new StringBuilder();
                logger.error("error execute " + this.toString(), ex);
                StringWriter stringWriter = new StringWriter();
                ex.printStackTrace(new PrintWriter(stringWriter));
                log.append(stringWriter.toString()).append("\n");
                log.append("result code:").append(2);
                return new ExecuteResult(ExecuteResult.State.ERROR, log.toString());
            }
            job = hadoopJob.getJob();
        }
        final StringBuilder output = new StringBuilder();
        final HadoopCmdOutput hadoopCmdOutput = new HadoopCmdOutput(job, output);

        //            final String restStatusCheckUrl = getRestStatusCheckUrl(job, context.getConfig());
        //            if (restStatusCheckUrl == null) {
        //                logger.error("restStatusCheckUrl is null");
        //                return new ExecuteResult(ExecuteResult.State.ERROR, "restStatusCheckUrl is null");
        //            }
        //            String mrJobId = hadoopCmdOutput.getMrJobId();
        //            boolean useKerberosAuth = context.getConfig().isGetJobStatusWithKerberos();
        //            HadoopStatusChecker statusChecker = new HadoopStatusChecker(restStatusCheckUrl, mrJobId, output, useKerberosAuth);
        JobStepStatusEnum status = JobStepStatusEnum.NEW;
        while (!isDiscarded() && !isPaused()) {

            JobStepStatusEnum newStatus = HadoopJobStatusChecker.checkStatus(job, output);
            if (status == JobStepStatusEnum.KILLED) {
                mgr.updateJobOutput(getId(), ExecutableState.ERROR, hadoopCmdOutput.getInfo(),
                        "killed by admin");
                return new ExecuteResult(ExecuteResult.State.FAILED, "killed by admin");
            }
            if (status == JobStepStatusEnum.WAITING && (newStatus == JobStepStatusEnum.FINISHED
                    || newStatus == JobStepStatusEnum.ERROR || newStatus == JobStepStatusEnum.RUNNING)) {
                final long waitTime = System.currentTimeMillis() - getStartTime();
                setMapReduceWaitTime(waitTime);
            }
            mgr.addJobInfo(getId(), hadoopCmdOutput.getInfo());
            status = newStatus;
            if (status.isComplete()) {
                final Map<String, String> info = hadoopCmdOutput.getInfo();
                readCounters(hadoopCmdOutput, info);
                mgr.addJobInfo(getId(), info);

                if (status == JobStepStatusEnum.FINISHED) {
                    return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
                } else {
                    return new ExecuteResult(ExecuteResult.State.FAILED, output.toString());
                }
            }
            Thread.sleep(context.getConfig().getYarnStatusCheckIntervalSeconds() * 1000L);
        }

        // try to kill running map-reduce job to release resources.
        if (job != null) {
            try {
                job.killJob();
            } catch (Exception e) {
                logger.warn("failed to kill hadoop job: " + job.getJobID(), e);
            }
        }

        if (isDiscarded()) {
            return new ExecuteResult(ExecuteResult.State.DISCARDED, output.toString());
        } else {
            return new ExecuteResult(ExecuteResult.State.STOPPED, output.toString());
        }

    } catch (ReflectiveOperationException e) {
        logger.error("error getMapReduceJobClass, class name:" + getParam(KEY_MR_JOB), e);
        return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage());
    } catch (Exception e) {
        logger.error("error execute " + this.toString(), e);
        return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage());
    }
}

From source file:org.huahinframework.manager.rest.service.JobService.java

License:Apache License

/**
 * @return {@link JSONObject}//from w  w  w . j a  v a 2 s. c  om
 */
@Path("/kill/id/{" + JOBID + "}")
@DELETE
@Produces(MediaType.APPLICATION_JSON)
public JSONObject killJobId(@PathParam(JOBID) String jobId) {
    Map<String, String> status = new HashMap<String, String>();
    try {
        Cluster cluster = new Cluster(getJobConf());
        for (JobStatus jobStatus : cluster.getAllJobStatuses()) {
            if (jobStatus.getJobID().toString().equals(jobId)) {
                Job job = cluster.getJob(jobStatus.getJobID());
                if (job == null) {
                    break;
                }

                job.killJob();
                status.put(Response.STATUS, "Killed job " + jobId);
                break;
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        log.error(e);
        status.put(Response.STATUS, e.getMessage());
    }

    if (status.isEmpty()) {
        status.put(Response.STATUS, "Could not find job " + jobId);
    }

    return new JSONObject(status);
}

From source file:org.huahinframework.manager.rest.service.JobService.java

License:Apache License

/**
 * @return {@link JSONObject}/*from www  .ja  va  2s  . c om*/
 */
@Path("/kill/name/{" + JOBNAME + "}")
@DELETE
@Produces(MediaType.APPLICATION_JSON)
public JSONObject killJobName(@PathParam(JOBNAME) String jobName) {
    Map<String, String> status = new HashMap<String, String>();
    try {
        Cluster cluster = new Cluster(getJobConf());
        for (JobStatus jobStatus : cluster.getAllJobStatuses()) {
            Job job = cluster.getJob(jobStatus.getJobID());
            if (job == null) {
                break;
            }

            if (job.getJobName().equals(jobName)) {
                job.killJob();
                status.put(Response.STATUS, "Killed job " + jobName);
                break;
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        log.error(e);
        status.put(Response.STATUS, e.getMessage());
    }

    if (status.isEmpty()) {
        status.put(Response.STATUS, "Could not find job " + jobName);
    }

    return new JSONObject(status);
}

From source file:org.mrgeo.mapreduce.job.JobListener.java

License:Apache License

public boolean cancelAll() throws JobCancelFailedException {
    boolean success = true;
    setCancelled();//from  ww  w.jav  a 2s . co  m
    synchronized (jobsListLock) {
        for (Job job : jobsList) {
            _log.info("User requested cancellation - killing job " + job.getJobName());
            //this is a hadoop job, so kill it.
            try {
                job.killJob();
            } catch (IOException e) {
                //log it, make a note of the fact that the job cancel failed 
                //so you can propagate the exception back
                _log.error("Kill job failed for " + job.getJobID());
                success = false;
            }
        }
        if (!success) {
            throw new JobCancelFailedException(
                    "Cancel failed for some of the hadoop jobs, see log for details.");
        }
    }
    return success;
}

From source file:org.springframework.data.hadoop.mapreduce.JobExecutor.java

License:Apache License

/**
 * Stops running job.//w  w w  . ja v  a 2 s  . c  om
 *
 * @param listener job listener
 * @return list of stopped jobs.
 * @throws Exception
 */
protected Collection<Job> stopJobs(final JobListener listener) {
    shuttingDown = true;

    final Collection<Job> jbs = findJobs();
    final List<Job> killedJobs = new ArrayList<Job>();

    taskExecutor.execute(new Runnable() {
        @Override
        public void run() {

            Object listenerInit = null;
            if (listener != null) {
                listenerInit = listener.beforeAction();
            }

            try {
                for (final Job job : jbs) {
                    try {
                        if (JobUtils.getStatus(job).isRunning()) {
                            synchronized (killedJobs) {
                                killedJobs.add(job);
                            }
                            log.info("Killing job [" + job.getJobName() + "]");
                            job.killJob();
                            if (listener != null) {
                                listener.jobKilled(job);
                            }
                        }
                    } catch (Exception ex) {
                        log.warn("Cannot kill job [" + job.getJobName() + "]", ex);
                        if (RuntimeException.class.isAssignableFrom(ex.getClass())) {
                            throw (RuntimeException) ex;
                        } else {
                            throw new IllegalStateException(ex);
                        }
                    }
                }
            } finally {
                if (listener != null) {
                    listener.afterAction(listenerInit);
                }
            }
        }
    });

    return jbs;
}

From source file:water.hadoop.h2odriver.java

public static void killJobAndWait(Job job) {
    boolean killed = false;

    try {//from ww  w . j  a va 2  s  .  c o  m
        System.out.println("Attempting to clean up hadoop job...");
        job.killJob();
        for (int i = 0; i < 5; i++) {
            if (job.isComplete()) {
                System.out.println("Killed.");
                killed = true;
                break;
            }

            Thread.sleep(1000);
        }
    } catch (Exception ignore) {
    } finally {
        if (!killed) {
            System.out.println("Kill attempt failed, please clean up job manually.");
        }
    }
}

From source file:weka.distributed.hadoop.HadoopJob.java

License:Open Source License

/**
 * Runs the supplied job/*from   w w  w.ja  v  a2  s  . co  m*/
 * 
 * @param job the job to run
 * @return true if the job was successful
 * @throws DistributedWekaException if a problem occurs
 */
protected boolean runJob(Job job) throws DistributedWekaException {
    try {
        m_stopRunningJob = false;
        if (DistributedJobConfig.isEmpty(getLoggingInterval())) {
            m_loggingInterval = "10";
        }
        int logInterval = Integer.parseInt(m_loggingInterval);
        System.out.println("Setting logging interval to " + logInterval);
        job.submit();

        try {
            int taskCompletionEventIndex = 0;
            while (!m_stopRunningJob && !job.isComplete()) {
                if (logInterval >= 1) {
                    printJobStatus(job);
                    taskCompletionEventIndex += logTaskMessages(job, taskCompletionEventIndex);

                    Thread.sleep(logInterval * 1000);
                } else {
                    Thread.sleep(60000);
                }
            }
        } catch (InterruptedException ie) {
            logMessage(ie.getMessage());
            m_stopRunningJob = true;
        }

        if (m_stopRunningJob && !job.isComplete()) {
            job.killJob();
        }
        m_stopRunningJob = false;

        return job.isSuccessful();
    } catch (Exception ex) {
        throw new DistributedWekaException(ex);
    }
}