Example usage for org.apache.hadoop.mapreduce Job killJob

List of usage examples for org.apache.hadoop.mapreduce Job killJob

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job killJob.

Prototype

public void killJob() throws IOException 

Source Link

Document

Kill the running job.

Usage

From source file:edu.iu.sgd.SGDLauncher.java

License:Apache License

private void runSGD(Path inputDir, int r, double lambda, double epsilon, int numIterations, int trainRatio,
        int numMapTasks, int numThreadsPerWorker, double scheduleRatio, int mem, Path modelDir, Path outputDir,
        String testFilePath, Configuration configuration)
        throws IOException, URISyntaxException, InterruptedException, ClassNotFoundException {
    System.out.println("Starting Job");
    int jobID = 0;
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println("Start Job#" + jobID + " "
            + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    Job sgdJob = configureSGDJob(inputDir, r, lambda, epsilon, numIterations, trainRatio, numMapTasks,
            numThreadsPerWorker, scheduleRatio, mem, modelDir, outputDir, testFilePath, configuration, jobID);
    boolean jobSuccess = sgdJob.waitForCompletion(true);
    System.out.println("End Jod#" + jobID + " "
            + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println("| Job#" + jobID + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime)
            + " miliseconds |");
    // ----------------------------------------
    if (!jobSuccess) {
        sgdJob.killJob();
        System.out.println("SGD Job failed. Job ID:" + jobID);
    }/*from  w  ww. jav  a 2  s  .  c  o  m*/
}

From source file:gobblin.compaction.mapreduce.MRCompactor.java

License:Apache License

@Override
public void cancel() throws IOException {
    try {/*w  w w  . j a  v a2s .com*/
        for (Map.Entry<Dataset, Job> entry : MRCompactor.RUNNING_MR_JOBS.entrySet()) {
            Job hadoopJob = entry.getValue();
            if (!hadoopJob.isComplete()) {
                LOG.info(String.format("Killing hadoop job %s for dataset %s", hadoopJob.getJobID(),
                        entry.getKey()));
                hadoopJob.killJob();
            }
        }
    } finally {
        try {
            ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG), 0, TimeUnit.NANOSECONDS);
        } finally {
            if (this.verifier.isPresent()) {
                this.verifier.get().closeNow();
            }
        }
    }
}

From source file:gobblin.compaction.mapreduce.MRCompactorJobRunner.java

License:Apache License

private void submitAndWait(Job job) throws ClassNotFoundException, IOException, InterruptedException {
    job.submit();/*w  w w. j a  v a  2 s.  co m*/
    MRCompactor.addRunningHadoopJob(this.dataset, job);
    LOG.info(String.format("MR job submitted for dataset %s, input %s, url: %s", this.dataset, getInputPaths(),
            job.getTrackingURL()));
    while (!job.isComplete()) {
        if (this.policy == Policy.ABORT_ASAP) {
            LOG.info(String.format("MR job for dataset %s, input %s killed due to input data incompleteness."
                    + " Will try again later", this.dataset, getInputPaths()));
            job.killJob();
            return;
        }
        Thread.sleep(MR_JOB_CHECK_COMPLETE_INTERVAL_MS);
    }
    if (!job.isSuccessful()) {
        throw new RuntimeException(String.format("MR job failed for topic %s, input %s, url: %s", this.dataset,
                getInputPaths(), job.getTrackingURL()));
    }
}

From source file:io.hops.erasure_coding.MapReduceBlockRepairManager.java

License:Apache License

@Override
public List<Report> computeReports() {
    List<Report> reports = new ArrayList<Report>();

    for (Map.Entry<String, Job> entry : currentRepairs.entrySet()) {
        String fileName = entry.getKey();
        Job job = entry.getValue();
        try {//w  ww. j  a  v a  2 s .  c om
            if (job.isComplete() && job.isSuccessful()) {
                LOG.info("REPAIR COMPLETE");
                reports.add(new Report(fileName, Report.Status.FINISHED));
                cleanup(job);
            } else if (job.isComplete() && !job.isSuccessful()) {
                LOG.info("REPAIR FAILED");
                reports.add(new Report(fileName, Report.Status.FAILED));
                cleanup(job);
            } /* TODO FIX timeout
              else if (System.currentTimeMillis() - job.getStartTime() > getMaxFixTimeForFile()) {
              LOG.info("Timeout: " + (System.currentTimeMillis() - job.getStartTime()) + " " + job.getStartTime());
              job.killJob();
              reports.add(new Report(fileName, Report.Status.CANCELED));
              cleanup(job);
              }*/ else {
                LOG.info("REPAIR RUNNING");
                reports.add(new Report(fileName, Report.Status.ACTIVE));
            }
        } catch (Exception e) {
            LOG.info("Exception during completeness check", e);
            try {
                job.killJob();
            } catch (Exception e1) {
            }
            reports.add(new Report(fileName, Report.Status.FAILED));
            cleanup(job);
        }
    }

    for (Report report : reports) {
        Report.Status status = report.getStatus();
        if (status == Report.Status.FINISHED || status == Report.Status.FAILED
                || status == Report.Status.CANCELED) {
            currentRepairs.remove(report.getFilePath());
        }
    }

    return reports;
}

From source file:io.hops.erasure_coding.MapReduceBlockRepairManager.java

License:Apache License

@Override
public void cancelAll() {
    for (Job job : currentRepairs.values()) {
        try {// w  w w  .j  a v  a 2 s . c  om
            job.killJob();
        } catch (Exception e) {
            LOG.error("Exception", e);
        }
        cleanup(job);
    }
    currentRepairs.clear();
}

From source file:io.hops.erasure_coding.MapReduceBlockRepairManager.java

License:Apache License

@Override
public void cancel(String toCancel) {
    Job job = currentRepairs.get(toCancel);
    try {/*from w  ww  .j ava 2s  .c o m*/
        job.killJob();
    } catch (Exception e) {
        LOG.error("Exception", e);
    }
    currentRepairs.remove(toCancel);
    cleanup(job);
}

From source file:net.thecubic.mockbi.MockBIMapReduce.java

License:Apache License

public void run() throws Exception {
    Configuration conf = new Configuration();
    Job job = new Job(conf, "MockBIMapReduce");

    Scan scan = new Scan();
    job.setJarByClass(this.getClass());
    TableMapReduceUtil.initTableMapperJob(MockBI.logTableName, scan, MockBILogMapper.class,
            MockBISummaryKey.class, DoubleWritable.class, job, true);
    TableMapReduceUtil.initTableReducerJob(MockBI.summaryTableName, MockBILogReducer.class, job);
    job.setInputFormatClass(TableInputFormat.class);
    job.setOutputFormatClass(TableOutputFormat.class);

    conf.setInt("initialsummary_interval", Calendar.MINUTE);

    conf.set(TableOutputFormat.OUTPUT_TABLE, MockBI.summaryTableName);

    //job.setOutputKeyClass();
    //job.setOutputValueClass();

    TableMapReduceUtil.addDependencyJars(job);

    boolean jobr;

    try {//from   w ww.ja va  2  s.co  m
        jobr = job.waitForCompletion(true);
    } catch (InterruptedException e) {
        System.err.println("killing job");
        job.killJob();
        jobr = false;
    }

}

From source file:org.acacia.csr.java.ZeroVertexSearcher.java

License:Apache License

public static void main(String[] args) throws Exception {
    /*//  w w  w.  j  a  v a2 s.  c om
    String dir1 = "/user/miyuru/wcout";
     //We first delete the temporary directories if they exist on the HDFS
      FileSystem fs1 = FileSystem.get(new JobConf());
              
     if(fs1.exists(new Path(dir1))){
        fs1.delete(new Path(dir1), true);
     }
            
    JobConf conf = new JobConf();
    conf.setNumMapTasks(96);
    Job job = new Job(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(LongWritable.class);
            
    job.setSortComparatorClass(SortComparator.class);
    FileInputFormat.addInputPath(job, new Path("/user/miyuru/input"));
    FileOutputFormat.setOutputPath(job, new Path(dir1));
    job.waitForCompletion(true); 
    */

    String dir3 = "/user/miyuru/zout";
    String dir5 = "/user/miyuru/input";
    //We first delete the temporary directories if they exist on the HDFS
    FileSystem fs3 = FileSystem.get(new JobConf());

    if (fs3.exists(new Path(dir3))) {
        fs3.delete(new Path(dir3), true);
    }

    JobConf conf3 = new JobConf();
    conf3.setNumMapTasks(96);
    FileInputFormat.addInputPath(conf3, new Path(dir5));
    FileOutputFormat.setOutputPath(conf3, new Path(dir3));
    conf3.set("mapred.map.max.attempts", "0");//If the job fails we assume that it happens because we found zero. Therfore we do not attempt again.
    Job job3 = new Job(conf3, "zero_vertex_search");
    job3.setJarByClass(ZeroVertexSearcher.class);
    job3.setMapperClass(TokenizerMapper.class);
    job3.setCombinerClass(IntSumReducer.class);
    job3.setReducerClass(IntSumReducer.class);
    job3.setOutputKeyClass(LongWritable.class);
    job3.setOutputValueClass(LongWritable.class);
    job3.setNumReduceTasks(0);

    job3.setSortComparatorClass(SortComparator.class);
    try {
        job3.waitForCompletion(true);
    } catch (org.acacia.csr.java.ZeroFoundException ex) {
        System.out.println("Found Zero vertex");
        job3.killJob();
    }
    System.out.println("------Done Zero Vertex search---------------");

}

From source file:org.apache.blur.mapreduce.lib.BlurOutputFormatTest.java

License:Apache License

public void testBlurOutputFormatCleanupDuringJobKillTest()
        throws IOException, InterruptedException, ClassNotFoundException {
    Path input = getInDir();// w w  w  . ja  va 2s  .c om
    Path output = getOutDir();
    _fileSystem.delete(input, true);
    _fileSystem.delete(output, true);
    // 1500 * 50 = 75,000
    writeRecordsFile(new Path(input, "part1"), 1, 50, 1, 1500, "cf1");
    // 100 * 5000 = 500,000
    writeRecordsFile(new Path(input, "part2"), 1, 5000, 2000, 100, "cf1");

    Job job = Job.getInstance(_conf, "blur index");
    job.setJarByClass(BlurOutputFormatTest.class);
    job.setMapperClass(CsvBlurMapper.class);
    job.setInputFormatClass(TextInputFormat.class);

    FileInputFormat.addInputPath(job, input);
    CsvBlurMapper.addColumns(job, "cf1", "col");

    Path tablePath = new Path(new Path(_root, "table"), "test");

    TableDescriptor tableDescriptor = new TableDescriptor();
    tableDescriptor.setShardCount(2);
    tableDescriptor.setTableUri(tablePath.toString());
    tableDescriptor.setName("test");

    createShardDirectories(getOutDir(), 2);

    BlurOutputFormat.setupJob(job, tableDescriptor);
    BlurOutputFormat.setOutputPath(job, output);
    BlurOutputFormat.setIndexLocally(job, false);

    job.submit();
    boolean killCalled = false;
    while (!job.isComplete()) {
        Thread.sleep(1000);
        System.out.printf("Killed [" + killCalled + "] Map [%f] Reduce [%f]%n", job.mapProgress() * 100,
                job.reduceProgress() * 100);
        if (job.reduceProgress() > 0.7 && !killCalled) {
            job.killJob();
            killCalled = true;
        }
    }

    assertFalse(job.isSuccessful());

    for (int i = 0; i < tableDescriptor.getShardCount(); i++) {
        Path path = new Path(output, ShardUtil.getShardName(i));
        FileSystem fileSystem = path.getFileSystem(job.getConfiguration());
        FileStatus[] listStatus = fileSystem.listStatus(path);
        assertEquals(toString(listStatus), 0, listStatus.length);
    }
}

From source file:org.apache.crunch.impl.mr.exec.MRExecutorIT.java

License:Apache License

/**
 * Tests that the pipeline should be stopped immediately when one of the jobs
 * get failed. The rest of running jobs should be killed.
 *//*from w ww .j av  a2  s  .c om*/
@Test
public void testStopPipelineImmediatelyOnJobFailure() throws Exception {
    String inPath = tmpDir.copyResourceFileName("shakes.txt");
    MRPipeline pipeline = new MRPipeline(MRExecutorIT.class);

    // Issue two jobs that sleep forever.
    PCollection<String> in = pipeline.read(From.textFile(inPath));
    for (int i = 0; i < 2; i++) {
        in.count().values().parallelDo(new SleepForeverFn(), longs())
                .write(To.textFile(tmpDir.getPath("out_" + i)));
    }
    MRPipelineExecution exec = pipeline.runAsync();

    // Wait until both of the two jobs are submitted.
    List<MRJob> jobs = exec.getJobs();
    assertEquals(2, jobs.size());
    StopWatch watch = new StopWatch();
    watch.start();
    int numOfJobsSubmitted = 0;
    while (numOfJobsSubmitted < 2 && watch.getTime() < 10000) {
        numOfJobsSubmitted = 0;
        for (MRJob job : jobs) {
            if (job.getJobState() == MRJob.State.RUNNING) {
                numOfJobsSubmitted++;
            }
        }
        Thread.sleep(100);
    }
    assertEquals(2, numOfJobsSubmitted);

    // Kill one of them.
    Job job0 = jobs.get(0).getJob();
    job0.killJob();

    // Expect the pipeline exits and the other job is killed.
    StopWatch watch2 = new StopWatch();
    watch2.start();
    Job job1 = jobs.get(1).getJob();
    while (!job1.isComplete() && watch2.getTime() < 10000) {
        Thread.sleep(100);
    }
    assertTrue(job1.isComplete());
    assertEquals(PipelineExecution.Status.FAILED, exec.getStatus());
}