Example usage for org.apache.hadoop.mapred RunningJob killJob

List of usage examples for org.apache.hadoop.mapred RunningJob killJob

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred RunningJob killJob.

Prototype

public void killJob() throws IOException;

Source Link

Document

Kill the running job.

Usage

From source file:DataJoinJob.java

License:Apache License

/**
 * Submit/run a map/reduce job./*from   w ww  .j a va2 s  . c o m*/
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
    JobClient jc = new JobClient(job);
    boolean sucess = true;
    RunningJob running = null;
    try {
        running = jc.submitJob(job);
        JobID jobId = running.getID();
        System.out.println("Job " + jobId + " is submitted");
        while (!running.isComplete()) {
            System.out.println("Job " + jobId + " is still running.");
            try {
                Thread.sleep(60000);
            } catch (InterruptedException e) {
            }
            running = jc.getJob(jobId);
        }
        sucess = running.isSuccessful();
    } finally {
        if (!sucess && (running != null)) {
            running.killJob();
        }
        jc.close();
    }
    return sucess;
}

From source file:com.ikanow.infinit.e.processing.custom.CustomProcessingController.java

License:Open Source License

public boolean killRunningJob(CustomMapReduceJobPojo jobToKillInfo) {
    try {/*from www  .j a v  a  2s .  c  o m*/
        Configuration conf = new Configuration();
        JobClient jc = new JobClient(InfiniteHadoopUtils.getJobClientConnection(prop_custom), conf);
        jc.setConf(conf); // (doesn't seem to be set by the above call)

        RunningJob jobToKill = jc.getJob(new JobID(jobToKillInfo.jobidS, jobToKillInfo.jobidN));
        if (null == jobToKill) {
            _logger.error("Couldn't find this job: " + jobToKillInfo.jobidS + "_" + jobToKillInfo.jobidN + " / "
                    + new JobID(jobToKillInfo.jobidS, jobToKillInfo.jobidN).toString());
            return false;
        }
        jobToKill.killJob();

        int nRuns = 0;
        while (!checkRunningJobs(jobToKillInfo)) {
            try {
                Thread.sleep(5000);
            } catch (Exception e) {
            }
            if (++nRuns > 24) { // bail out after 2 minutes 
                _logger.error("Killed job: " + jobToKillInfo.jobidS + "_" + jobToKillInfo.jobidN
                        + ", but job failed to stop within time allowed");
                return false;
            }
        }
        if (null != jobToKillInfo.derivedFromSourceKey) { // Update the derived source, if one existse 
            BasicDBObject query = new BasicDBObject(SourcePojo.key_, jobToKillInfo.derivedFromSourceKey);
            BasicDBObject setUpdate = new BasicDBObject(SourceHarvestStatusPojo.sourceQuery_harvest_status_,
                    HarvestEnum.error.toString());
            setUpdate.put(SourceHarvestStatusPojo.sourceQuery_harvest_message_, "Manually stopped");
            BasicDBObject srcUpdate = new BasicDBObject(DbManager.set_, setUpdate);
            DbManager.getIngest().getSource().update(query, srcUpdate, false, false);
        } //TESTED (actually a bit pointless usually because is then overwritten by the source publish)
        return true;
    } catch (Exception e) {
        _logger.error("Failed to kill job: " + jobToKillInfo.jobidS + "_" + jobToKillInfo.jobidN + " / "
                + e.getMessage(), e);
        return false;
    }
}

From source file:com.ngdata.hbaseindexer.master.IndexerMaster.java

License:Apache License

private void prepareDeleteIndex(String indexerName) {
    // We do not have to take a lock on the indexer, since once in delete state the indexer cannot
    // be modified anymore by ordinary users.
    boolean canBeDeleted = false;
    try {/*from  ww  w . j  a v  a  2  s  .c  o m*/
        // Read current situation of record and assure it is still actual
        IndexerDefinition indexer = indexerModel.getFreshIndexer(indexerName);
        if (indexer.getLifecycleState() == IndexerDefinition.LifecycleState.DELETE_REQUESTED) {
            canBeDeleted = true;

            String queueSubscriptionId = indexer.getSubscriptionId();
            if (queueSubscriptionId != null) {
                sepModel.removeSubscription(indexer.getSubscriptionId());
                // We leave the subscription ID in the indexer definition FYI
            }

            if (indexer.getActiveBatchBuildInfo() != null) {
                JobClient jobClient = getJobClient();
                Set<String> jobs = indexer.getActiveBatchBuildInfo().getMapReduceJobTrackingUrls().keySet();
                for (String jobId : jobs) {
                    RunningJob job = jobClient.getJob(JobID.forName(jobId));
                    if (job != null) {
                        job.killJob();
                        log.info("Kill indexer build job for indexer " + indexerName + ", job ID =  " + jobId);
                    }
                    canBeDeleted = false;
                }
            }

            if (!canBeDeleted) {
                indexer = new IndexerDefinitionBuilder().startFrom(indexer)
                        .lifecycleState(IndexerDefinition.LifecycleState.DELETING).build();
                indexerModel.updateIndexerInternal(indexer);
            }
        } else if (indexer.getLifecycleState() == IndexerDefinition.LifecycleState.DELETING) {
            // Check if the build job is already finished, if so, allow delete
            if (indexer.getActiveBatchBuildInfo() == null) {
                canBeDeleted = true;
            }
        }
    } catch (Throwable t) {
        log.error("Error preparing deletion of indexer " + indexerName, t);
    }

    if (canBeDeleted) {
        deleteIndexer(indexerName);
    }
}

From source file:edu.stolaf.cs.wmrserver.HadoopEngine.java

License:Apache License

public void kill(Submission submission) throws NotFoundException, IllegalJobStateException, InternalException {
    RunningJob job = getJob(submission);

    // Attempt to kill job
    try {//www .  j a  v  a 2s.c o  m
        if (job.isComplete())
            throw new IllegalJobStateException("Job was already complete before it could be killed.");

        job.killJob();
    } catch (IOException ex) {
        throw JobServiceHandler.wrapException("Job could not be killed.", ex);
    }
}

From source file:hydrograph.engine.cascading.flow.CustomFlowStepListener.java

License:Apache License

@Override
public boolean onStepThrowable(FlowStep flowstep, Throwable arg1) {
    for (Flow flow : flows) {
        List<FlowStepStats> flows = flow.getFlowStats().getFlowStepStats();
        for (FlowStepStats flowStat : flows) {
            HadoopStepStats stats = (HadoopStepStats) flowStat;
            try {
                RunningJob runningJob = stats.getJobStatusClient();
                if (runningJob != null) {
                    JobID jobID = runningJob.getID();
                    LOG.error("Killing Job " + jobID.getId());
                    runningJob.killJob();
                    LOG.info("Job: '" + jobID.getId() + "' started at: " + stats.getStartTime()
                            + " killed successfully!");
                }//from   w w  w.  j  ava2 s.c  o  m
            } catch (Exception e) {
                LOG.error("", e);
                throw new RuntimeException(e);
            }
        }
    }
    return true;
}

From source file:org.apache.giraph.graph.BspServiceMaster.java

License:Apache License

/**
 * When there is no salvaging this job, fail it.
 *
 * @throws IOException/*w ww. jav  a2s .co  m*/
 */
private void failJob() {
    LOG.fatal("failJob: Killing job " + getJobId());
    try {
        @SuppressWarnings("deprecation")
        org.apache.hadoop.mapred.JobClient jobClient = new org.apache.hadoop.mapred.JobClient(
                (org.apache.hadoop.mapred.JobConf) getConfiguration());
        @SuppressWarnings("deprecation")
        org.apache.hadoop.mapred.JobID jobId = org.apache.hadoop.mapred.JobID.forName(getJobId());
        RunningJob job = jobClient.getJob(jobId);
        job.killJob();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.giraph.master.BspServiceMaster.java

License:Apache License

/**
 * When there is no salvaging this job, fail it.
 *
 * @param e//from w  w w . jav  a 2  s.  co  m
 *            Exception to log to observers
 */
private void failJob(Exception e) {
    LOG.fatal("failJob: Killing job " + getJobId());
    LOG.fatal("failJob: exception " + e.toString());
    try {
        if (getConfiguration().isPureYarnJob()) {
            throw new RuntimeException("BspServiceMaster (YARN profile) is "
                    + "FAILING this task, throwing exception to end job run.", e);
        } else {
            @SuppressWarnings("deprecation")
            org.apache.hadoop.mapred.JobClient jobClient = new org.apache.hadoop.mapred.JobClient(
                    (org.apache.hadoop.mapred.JobConf) getContext().getConfiguration());
            @SuppressWarnings("deprecation")
            JobID jobId = JobID.forName(getJobId());
            RunningJob job = jobClient.getJob(jobId);
            if (job != null) {
                job.killJob();
            } else {
                LOG.error("Jon not found for jobId=" + getJobId());
            }
        }
    } catch (IOException ioe) {
        throw new RuntimeException(ioe);
    } finally {
        failureCleanup(e);
    }
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

@Override
public void kill(Context context, WorkflowAction action) throws ActionExecutorException {
    JobClient jobClient = null;//from   w ww.  ja va 2  s .  c  o m
    boolean exception = false;
    try {
        Element actionXml = XmlUtils.parseXml(action.getConf());
        JobConf jobConf = createBaseHadoopConf(context, actionXml);
        jobClient = createJobClient(context, jobConf);
        RunningJob runningJob = getRunningJob(context, action, jobClient);
        if (runningJob != null) {
            runningJob.killJob();
        }
        context.setExternalStatus(KILLED);
        context.setExecutionData(KILLED, null);
    } catch (Exception ex) {
        exception = true;
        throw convertException(ex);
    } finally {
        try {
            FileSystem actionFs = context.getAppFileSystem();
            cleanUpActionDir(actionFs, context);
            if (jobClient != null) {
                jobClient.close();
            }
        } catch (Exception ex) {
            if (exception) {
                LOG.error("Error: ", ex);
            } else {
                throw convertException(ex);
            }
        }
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.java

License:Apache License

@Override
public void killJob(String jobID, Configuration conf) throws BackendException {
    try {//from   w w w. j  av a 2s  . c  o m
        if (conf != null) {
            JobConf jobConf = new JobConf(conf);
            JobClient jc = new JobClient(jobConf);
            JobID id = JobID.forName(jobID);
            RunningJob job = jc.getJob(id);
            if (job == null)
                System.out.println("Job with id " + jobID + " is not active");
            else {
                job.killJob();
                log.info("Kill " + id + " submitted.");
            }
        }
    } catch (IOException e) {
        throw new BackendException(e);
    }
}

From source file:org.apache.sqoop.submission.mapreduce.MapreduceSubmissionEngine.java

License:Apache License

/**
 * {@inheritDoc}//w  ww. j  a  v a 2  s  .co  m
 */
@Override
public void stop(String submissionId) {
    try {
        RunningJob runningJob = jobClient.getJob(JobID.forName(submissionId));
        if (runningJob == null) {
            return;
        }

        runningJob.killJob();
    } catch (IOException e) {
        throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0003, e);
    }
}