Example usage for org.apache.hadoop.mapreduce.lib.jobcontrol JobControl getSuccessfulJobList

List of usage examples for org.apache.hadoop.mapreduce.lib.jobcontrol JobControl getSuccessfulJobList

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.lib.jobcontrol JobControl getSuccessfulJobList.

Prototype

synchronized public List<ControlledJob> getSuccessfulJobList() 

Source Link

Usage

From source file:com.laizuozuoba.WordCount.java

License:Apache License

public static void main(String[] args) throws Exception {
    // System.setProperty("hadoop.home.dir", "D:\\hadoop-2.2.0");
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
        System.err.println("Usage: wordcount <in> <out>");
        System.exit(2);/*w w w  .j  a v  a 2  s. c  o  m*/
    }
    Job job = new Job(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

    Job job2 = new Job(conf, "uv");
    job2.setJarByClass(WordCount.class);
    job2.setMapperClass(UVMapper.class);
    job2.setCombinerClass(UVReducer.class);
    job2.setReducerClass(UVReducer.class);
    job2.setOutputKeyClass(Text.class);
    job2.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job2, new Path(otherArgs[1]));
    FileOutputFormat.setOutputPath(job2, new Path("hdfs://10.18.106.67:9100/result2"));

    ControlledJob controlledJob = new ControlledJob(job.getConfiguration());
    ControlledJob controlledJob2 = new ControlledJob(job2.getConfiguration());
    controlledJob2.addDependingJob(controlledJob);
    JobControl jc = new JobControl("123");
    jc.addJob(controlledJob);
    jc.addJob(controlledJob2);

    Thread jcThread = new Thread(jc);
    jcThread.start();
    while (true) {
        if (jc.allFinished()) {
            System.out.println(jc.getSuccessfulJobList());
            jc.stop();
            break;
        }
        if (jc.getFailedJobList().size() > 0) {
            System.out.println(jc.getFailedJobList());
            jc.stop();
            break;
        }
        Thread.sleep(1000);
    }
    System.out.println("Finished!!!!!!!!!!!!!!!!!!!!!!!");
}

From source file:com.niuwa.hadoop.jobs.sample.JobControlTest.java

License:Apache License

public static void main(String[] args) throws Exception {
    HadoopUtil.isWinOrLiux();/*w ww  .ja  v a2 s. c  o  m*/
    Configuration conf = new Configuration();
    String path = "hdfs://ns1:9000/user/root";
    if (args.length != 0) {
        path = args[0];
    }
    String[] args_1 = new String[] { path + "/chubao/input/contact",
            path + "/chubao/temp/" + DateUtil.format(new Date()) + "/contact_total",
            path + "/chubao/temp/" + DateUtil.format(new Date()) + "/contact_total_next" };
    String[] otherArgs = new GenericOptionsParser(conf, args_1).getRemainingArgs();
    // job
    Job job = Job.getInstance(conf, "word count");
    job.setJarByClass(JobControlTest.class);
    job.setMapperClass(UserIdMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    // 
    deleteOutputFile(otherArgs[1], otherArgs[0]);
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

    // job
    Job job2 = Job.getInstance(conf, "job2");
    job2.setJarByClass(JobControlTest.class);
    job2.setMapperClass(AddDateMapper.class);
    job2.setReducerClass(Job2Reducer.class);
    job2.setOutputKeyClass(IntWritable.class);
    job2.setOutputValueClass(Text.class);

    FileInputFormat.addInputPath(job2, new Path(otherArgs[1]));
    // 
    deleteOutputFile(otherArgs[2], otherArgs[1]);
    FileOutputFormat.setOutputPath(job2, new Path(otherArgs[2]));

    // ControlledJob
    ControlledJob controlledJob1 = new ControlledJob(job.getConfiguration());
    ControlledJob controlledJob2 = new ControlledJob(job2.getConfiguration());

    // ?
    controlledJob2.addDependingJob(controlledJob1);

    // JobControl
    JobControl jobControl = new JobControl("JobControlDemoGroup");
    jobControl.addJob(controlledJob1);
    jobControl.addJob(controlledJob2);

    // ?
    Thread jobControlThread = new Thread(jobControl);
    jobControlThread.start();
    while (true) {
        if (jobControl.allFinished()) {
            System.out.println(jobControl.getSuccessfulJobList());
            jobControl.stop();
            break;
        }
    }
}

From source file:ml.shifu.guagua.mapreduce.GuaguaMapReduceClient.java

License:Apache License

/**
 * Compute the progress of the current job submitted through the JobControl object jc to the JobClient jobClient
 * /*from  w w  w.j av  a  2  s.com*/
 * @param jc
 *            The JobControl object that has been submitted
 * @param jobClient
 *            The JobClient to which it has been submitted
 * @return The progress as a percentage in double format
 * @throws IOException
 *             In case any IOException connecting to JobTracker.
 */
protected double calculateProgress(JobControl jc, JobClient jobClient) throws IOException {
    double prog = 0.0;
    prog += jc.getSuccessfulJobList().size();

    List<ControlledJob> runnJobs = jc.getRunningJobList();
    for (ControlledJob cjob : runnJobs) {
        prog += progressOfRunningJob(cjob, jobClient);
    }
    return prog;
}

From source file:tv.icntv.grade.film.dbcollect.TableInitJob.java

License:Apache License

@Override
public int run(String[] strings) throws Exception {

    Configuration configuration = getConf();
    JobControl jobControl = new JobControl("init data");
    for (String table : strings) {
        String dbPath = String.format(configuration.get("hdfs.directory.base.db"), new Date(), table);
        //            String[] arrays = new String[]{table,//input table
        //                    String.format(configuration.get("hdfs.directory.from.hbase"), new Date(), table),
        //                    db
        //            };
        String hbasePath = String.format(configuration.get("hdfs.directory.from.hbase"), new Date(), table);
        //table job
        Job tableJob = new Job(configuration, "icntv grade init");
        Scan scan = new Scan();

        HadoopUtils.deleteIfExist(hbasePath);
        HadoopUtils.deleteIfExist(dbPath);
        TableMapReduceUtil.initTableMapperJob(table, scan, TableInitMapper.class, Text.class, Text.class,
                tableJob);//from   w w w .  jav a 2 s  .  c  o  m
        MapReduceUtils.initReducerJob(new Path(hbasePath), TableInitReducer.class, tableJob);
        ControlledJob firstControll = new ControlledJob(configuration);
        firstControll.setJob(tableJob);
        //            tableJob.waitForCompletion(true);
        Job db = new Job(configuration, "icntv db collect");
        configuration.setLong("mapred.min.split.size", 512 * 2014 * 1024L);
        MapReduceUtils.initMapperJob(DefaultHbaseMapper.class, Text.class, Text.class, this.getClass(), db,
                new Path(hbasePath));
        FileOutputFormat.setOutputPath(db, new Path(dbPath));
        db.setNumReduceTasks(0);
        ControlledJob secondaryController = new ControlledJob(configuration);
        secondaryController.setJob(db);
        secondaryController.addDependingJob(firstControll);
        jobControl.addJob(firstControll);
        jobControl.addJob(secondaryController);
    }
    new Thread(jobControl).start();
    while (!jobControl.allFinished()) {
        Thread.sleep(5000);
    }
    logger.info("job controller successed job size=" + jobControl.getSuccessfulJobList().size());
    //        db.waitForCompletion(true);
    return 0;
}