List of usage examples for org.apache.hadoop.mapreduce Job Job
Job(JobConf conf) throws IOException
From source file:co.nubetech.hiho.merge.MergeJob.java
License:Apache License
@Override public int run(String[] args) throws Exception { populateConfiguration(args);//from w ww .ja v a2s .c o m try { checkMandatoryConfs(); } catch (HIHOException e1) { e1.printStackTrace(); throw new Exception(e1); } Class inputFormatClass = Class.forName(inputFormat); Class outputFormatClass = Class.forName(outputFormat); Class inputKeyClass = Class.forName(inputKeyClassName); Class inputValueClass = Class.forName(inputValueClassName); Configuration conf = getConf(); conf.set(HIHOConf.MERGE_OLD_PATH, oldPath); conf.set(HIHOConf.MERGE_NEW_PATH, newPath); Job job = new Job(conf); job.setJobName("Merge job"); job.setJarByClass(MergeJob.class); if (mergeBy.equals("key")) { job.setMapperClass(MergeKeyMapper.class); job.setReducerClass(MergeKeyReducer.class); } else if (mergeBy.equals("value")) { job.setMapperClass(MergeValueMapper.class); job.setReducerClass(MergeValueReducer.class); } job.setInputFormatClass(inputFormatClass); DelimitedTextInputFormat.setProperties(job, delimiter, column); job.setMapOutputKeyClass(HihoTuple.class); job.setMapOutputValueClass(HihoValue.class); job.setOutputKeyClass(inputKeyClass); job.setOutputValueClass(inputValueClass); FileInputFormat.setInputPaths(job, oldPath + "," + newPath); job.setOutputFormatClass(outputFormatClass); FileOutputFormat.setOutputPath(job, new Path(outputPath)); try { logger.debug("Output format class is " + job.getOutputFormatClass()); logger.debug("Class is " + ReflectionUtils .newInstance(job.getOutputFormatClass(), job.getConfiguration()).getClass().getName()); job.waitForCompletion(false); if (job.isComplete()) { Counters counters = job.getCounters(); totalRecordsOld = counters.findCounter(MergeRecordCounter.TOTAL_RECORDS_OLD).getValue(); totalRecordsNew = counters.findCounter(MergeRecordCounter.TOTAL_RECORDS_NEW).getValue(); badRecords = counters.findCounter(MergeRecordCounter.BAD_RECORD).getValue(); output = counters.findCounter(MergeRecordCounter.OUTPUT).getValue(); logger.info("Total old records read are: " + totalRecordsOld); logger.info("Total new records read are: " + totalRecordsNew); logger.info("Bad Records are: " + badRecords); logger.info("Output records are: " + output); } } catch (Exception e) { e.printStackTrace(); } return 0; }
From source file:co.nubetech.hiho.similarity.ngram.NGramJob.java
License:Apache License
@Override public int run(String[] args) throws Exception { Configuration conf = getConf(); populateConfiguration(args);//from www .j a v a 2 s .c om try { checkMandatoryConfs(); } catch (HIHOException e1) { e1.printStackTrace(); throw new Exception(e1); } Job job = new Job(conf); job.setJobName("NGram job"); job.setJarByClass(NGramJob.class); Class inputFormatClass = Class.forName("org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat"); Class outputFormatClass = Class.forName("org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"); // org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat // org.apache.hadoop.mapreduce.lib.output.TextOutputFormat Class inputKeyClass = Class.forName("org.apache.hadoop.io.Text"); Class inputValueClass = Class.forName("org.apache.hadoop.io.Text"); Class outputKeyClass = Class.forName("co.nubetech.hiho.similarity.ngram.ValuePair"); Class outputValueClass = Class.forName("org.apache.hadoop.io.IntWritable"); job.setMapperClass(NGramMapper.class); job.setReducerClass(NGramReducer.class); job.setInputFormatClass(inputFormatClass); job.setMapOutputKeyClass(inputKeyClass); job.setMapOutputValueClass(inputValueClass); job.setOutputKeyClass(outputKeyClass); job.setOutputValueClass(outputValueClass); job.setOutputFormatClass(outputFormatClass); FileInputFormat.setInputPaths(job, inputPath); FileOutputFormat.setOutputPath(job, new Path("outputOfNGramJob")); int ret = 0; try { ret = job.waitForCompletion(true) ? 0 : 1; } catch (Exception e) { e.printStackTrace(); } return ret; }
From source file:co.nubetech.hiho.similarity.ngram.ScoreJob.java
License:Apache License
@Override public int run(String[] arg0) throws Exception { Configuration conf = getConf(); Job job = new Job(conf); job.setJobName("Score job"); job.setJarByClass(ScoreJob.class); Class inputFormatClass = Class.forName("org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat"); Class outputFormatClass = Class.forName("org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"); // org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat // org.apache.hadoop.mapreduce.lib.output.TextOutputFormat Class inputKeyClass = Class.forName("co.nubetech.hiho.similarity.ngram.ValuePair"); Class inputValueClass = Class.forName("org.apache.hadoop.io.IntWritable"); Class outputKeyClass = Class.forName("co.nubetech.hiho.similarity.ngram.ValuePair"); Class outputValueClass = Class.forName("org.apache.hadoop.io.LongWritable"); job.setMapperClass(ScoreMapper.class); job.setReducerClass(ScoreReducer.class); job.setInputFormatClass(inputFormatClass); job.setMapOutputKeyClass(inputKeyClass); job.setMapOutputValueClass(inputValueClass); job.setOutputKeyClass(outputKeyClass); job.setOutputValueClass(outputValueClass); job.setOutputFormatClass(outputFormatClass); FileInputFormat.setInputPaths(job, "outputOfNGramJob"); FileOutputFormat.setOutputPath(job, new Path("outputOfScoreJob")); int ret = 0;//from w w w . j a va2 s . c o m try { ret = job.waitForCompletion(true) ? 0 : 1; } catch (Exception e) { e.printStackTrace(); } return ret; }
From source file:com.alexholmes.hadooputils.combine.seqfile.mapreduce.CombineSequenceFileJob.java
License:Apache License
/** * The driver for the MapReduce job.//from w w w . j ava2 s. c om * * @param conf configuration * @param inputDirAsString input directory in CSV-form * @param outputDirAsString output directory * @return true if the job completed successfully * @throws java.io.IOException if something went wrong * @throws java.net.URISyntaxException if a URI wasn't correctly formed */ public boolean runJob(final Configuration conf, final String inputDirAsString, final String outputDirAsString) throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException { Job job = new Job(conf); job.setJarByClass(CombineSequenceFileJob.class); job.setJobName("seqfilecombiner"); job.setNumReduceTasks(0); // job.setMapperClass(IdentityMapper.class); job.setInputFormatClass(CombineSequenceFileInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, inputDirAsString); FileOutputFormat.setOutputPath(job, new Path(outputDirAsString)); Date startTime = new Date(); System.out.println("Job started: " + startTime); boolean jobResult = job.waitForCompletion(true); Date endTime = new Date(); System.out.println("Job ended: " + endTime); System.out.println("The job took " + TimeUnit.MILLISECONDS.toSeconds(endTime.getTime() - startTime.getTime()) + " seconds."); return jobResult; }
From source file:com.alexholmes.hadooputils.combine.seqfile.mapreduce.CombineSequenceFileTest.java
License:Apache License
@Test public void testOneFile() throws IOException, InterruptedException { Path dir = new Path(tempFolder.getRoot().getAbsolutePath()); CombineSequenceFileInputFormat<Text, Text> inputFormat = new CombineSequenceFileInputFormat<Text, Text>(); Path inputFile = new Path(dir, "file1.txt"); writeSequenceFile(inputFile);/*from w w w.ja v a 2 s.co m*/ Job job = new Job(new JobConf()); FileInputFormat.addInputPath(job, inputFile); List<InputSplit> splits = inputFormat.getSplits(job); assertEquals(1, splits.size()); TaskAttemptID taskId = new TaskAttemptID("jt", 0, true, 0, 0); Configuration conf1 = new Configuration(); TaskAttemptContext context1 = new TaskAttemptContext(conf1, taskId); RecordReader<Text, Text> rr = inputFormat.createRecordReader(splits.get(0), context1); rr.initialize(splits.get(0), context1); assertTrue(rr.nextKeyValue()); assertEquals(key, rr.getCurrentKey()); assertEquals(value, rr.getCurrentValue()); assertFalse(rr.nextKeyValue()); assertEquals(1.0f, rr.getProgress(), 0.1); }
From source file:com.alexholmes.hadooputils.combine.seqfile.mapreduce.CombineSequenceFileTest.java
License:Apache License
@Test public void testTwoFiles() throws IOException, InterruptedException { Path dir = new Path(tempFolder.getRoot().getAbsolutePath()); CombineSequenceFileInputFormat<Text, Text> inputFormat = new CombineSequenceFileInputFormat<Text, Text>(); Path inputFile1 = new Path(dir, "file1.txt"); Path inputFile2 = new Path(dir, "file2.txt"); writeSequenceFile(inputFile1);//from www . j av a 2s.c o m writeSequenceFile(inputFile2); Job job = new Job(new JobConf()); FileInputFormat.addInputPath(job, inputFile1); FileInputFormat.addInputPath(job, inputFile2); List<InputSplit> splits = inputFormat.getSplits(job); assertEquals(1, splits.size()); TaskAttemptID taskId = new TaskAttemptID("jt", 0, true, 0, 0); Configuration conf1 = new Configuration(); TaskAttemptContext context1 = new TaskAttemptContext(conf1, taskId); RecordReader<Text, Text> rr = inputFormat.createRecordReader(splits.get(0), context1); rr.initialize(splits.get(0), context1); assertTrue(rr.nextKeyValue()); assertEquals(key, rr.getCurrentKey()); assertEquals(value, rr.getCurrentValue()); assertEquals(0.5f, rr.getProgress(), 0.1); assertTrue(rr.nextKeyValue()); assertEquals(key, rr.getCurrentKey()); assertEquals(value, rr.getCurrentValue()); assertFalse(rr.nextKeyValue()); assertEquals(1.0f, rr.getProgress(), 0.1); }
From source file:com.alexholmes.json.mapreduce.ExampleJob.java
License:Apache License
/** * The MapReduce driver - setup and launch the job. * * @param args the command-line arguments * @return the process exit code/*from w w w . j av a 2 s . c o m*/ * @throws Exception if something goes wrong */ public int run(final String[] args) throws Exception { String input = args[0]; String output = args[1]; Configuration conf = super.getConf(); writeInput(conf, new Path(input)); Job job = new Job(conf); job.setJarByClass(ExampleJob.class); job.setMapperClass(Map.class); job.setNumReduceTasks(0); Path outputPath = new Path(output); FileInputFormat.setInputPaths(job, input); FileOutputFormat.setOutputPath(job, outputPath); // use the JSON input format job.setInputFormatClass(MultiLineJsonInputFormat.class); // specify the JSON attribute name which is used to determine which // JSON elements are supplied to the mapper MultiLineJsonInputFormat.setInputJsonMember(job, "colorName"); if (job.waitForCompletion(true)) { return 0; } return 1; }
From source file:com.aliyun.openservices.tablestore.hive.TableStoreInputFormat.java
License:Apache License
@Override public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { Configuration dest = translateConfig(job); SyncClientInterface ots = null;/*from ww w .j a va 2s. c o m*/ String columns = job.get(TableStoreConsts.COLUMNS_MAPPING); if (columns == null) { columns = job.get(serdeConstants.LIST_COLUMNS); } logger.debug("columns to get: {}", columns); List<org.apache.hadoop.mapreduce.InputSplit> splits; try { ots = TableStore.newOtsClient(dest); TableMeta meta = fetchTableMeta(ots, job.get(TableStoreConsts.TABLE_NAME)); RangeRowQueryCriteria criteria = fetchCriteria(meta, columns); com.aliyun.openservices.tablestore.hadoop.TableStoreInputFormat.addCriteria(dest, criteria); splits = com.aliyun.openservices.tablestore.hadoop.TableStoreInputFormat.getSplits(dest, ots); } finally { if (ots != null) { ots.shutdown(); ots = null; } } InputSplit[] res = new InputSplit[splits.size()]; JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(new Job(job)); Path[] tablePaths = FileInputFormat.getInputPaths(jobContext); int i = 0; for (org.apache.hadoop.mapreduce.InputSplit split : splits) { res[i] = new TableStoreInputSplit( (com.aliyun.openservices.tablestore.hadoop.TableStoreInputSplit) split, tablePaths[0]); ++i; } return res; }
From source file:com.ambiata.ivory.operation.hadoop.DelegatingInputFormat.java
License:Apache License
@SuppressWarnings("unchecked") public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException { Configuration conf = job.getConfiguration(); Job jobCopy = new Job(conf); List<InputSplit> splits = new ArrayList<InputSplit>(); Map<Path, InputFormat> formatMap = MultipleInputs.getInputFormatMap(job); Map<Path, Class<? extends Mapper>> mapperMap = MultipleInputs.getMapperTypeMap(job); Map<Class<? extends InputFormat>, List<Path>> formatPaths = new HashMap<Class<? extends InputFormat>, List<Path>>(); // First, build a map of InputFormats to Paths for (Entry<Path, InputFormat> entry : formatMap.entrySet()) { if (!formatPaths.containsKey(entry.getValue().getClass())) { formatPaths.put(entry.getValue().getClass(), new LinkedList<Path>()); }/* w w w . jav a 2 s . com*/ formatPaths.get(entry.getValue().getClass()).add(entry.getKey()); } for (Entry<Class<? extends InputFormat>, List<Path>> formatEntry : formatPaths.entrySet()) { Class<? extends InputFormat> formatClass = formatEntry.getKey(); InputFormat format = (InputFormat) ReflectionUtils.newInstance(formatClass, conf); List<Path> paths = formatEntry.getValue(); Map<Class<? extends Mapper>, List<Path>> mapperPaths = new HashMap<Class<? extends Mapper>, List<Path>>(); // Now, for each set of paths that have a common InputFormat, build // a map of Mappers to the paths they're used for for (Path path : paths) { Class<? extends Mapper> mapperClass = mapperMap.get(path); if (!mapperPaths.containsKey(mapperClass)) { mapperPaths.put(mapperClass, new LinkedList<Path>()); } mapperPaths.get(mapperClass).add(path); } // Now each set of paths that has a common InputFormat and Mapper can // be added to the same job, and split together. for (Entry<Class<? extends Mapper>, List<Path>> mapEntry : mapperPaths.entrySet()) { paths = mapEntry.getValue(); Class<? extends Mapper> mapperClass = mapEntry.getKey(); if (mapperClass == null) { try { mapperClass = job.getMapperClass(); } catch (ClassNotFoundException e) { throw new IOException("Mapper class is not found", e); } } FileInputFormat.setInputPaths(jobCopy, paths.toArray(new Path[paths.size()])); // Get splits for each input path and tag with InputFormat // and Mapper types by wrapping in a TaggedInputSplit. List<InputSplit> pathSplits = format.getSplits(jobCopy); for (InputSplit pathSplit : pathSplits) { splits.add(new TaggedInputSplit(pathSplit, conf, format.getClass(), mapperClass)); } } } return splits; }
From source file:com.asakusafw.runtime.compatibility.hadoop1.JobCompatibilityHadoop1.java
License:Apache License
@Override public Job newJob(Configuration conf) throws IOException { if (conf == null) { throw new IllegalArgumentException("conf must not be null"); //$NON-NLS-1$ }// w w w .j av a2 s .c o m return new Job(conf); }