List of usage examples for org.apache.hadoop.mapreduce Job setMapOutputKeyClass
public void setMapOutputKeyClass(Class<?> theClass) throws IllegalStateException
From source file:com.shopzilla.hadoop.mapreduce.MiniMRClusterContextMRTest.java
License:Apache License
@Test public void testWordCount() throws Exception { Path input = new Path("/user/test/keywords_data"); Path output = new Path("/user/test/word_count"); Job job = new Job(configuration); job.setJobName("Word Count Test"); job.setMapperClass(WordCountMapper.class); job.setReducerClass(SumReducer.class); job.setInputFormatClass(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); job.setNumReduceTasks(1);/*ww w . j a v a 2 s . com*/ FileInputFormat.setInputPaths(job, input); FileOutputFormat.setOutputPath(job, output); assertTrue("All files from /data classpath directory should have been copied into HDFS", miniMRClusterContext.getFileSystem().exists(input)); job.waitForCompletion(true); assertTrue("Output file should have been created", miniMRClusterContext.getFileSystem().exists(output)); final LinkedList<String> expectedLines = new LinkedList<String>(); expectedLines.add("goodbye\t1"); expectedLines.add("hello\t1"); expectedLines.add("world\t2"); miniMRClusterContext.processData(output, new Function<String, Void>() { @Override public Void apply(String line) { assertEquals(expectedLines.pop(), line); return null; } }); assertEquals(0, expectedLines.size()); }
From source file:com.sirius.hadoop.job.onlinetime.OnlineTimeJob.java
License:Apache License
public Job build() throws Exception { //init// w w w. java 2s.c o m Job job = Job.getInstance(getConf(), "onlinetime"); job.setJarByClass(OnlineTimeJob.class); //mapp job.setMapperClass(StatusMapper.class); job.setMapOutputKeyClass(StatusKey.class); job.setMapOutputValueClass(OnlineRecord.class); //custom partition job.setPartitionerClass(StatusKeyPartitioner.class); //reduce job.setGroupingComparatorClass(StatusKeyGroupComparator.class); job.setReducerClass(StatusReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); //input FileInputFormat.setInputPaths(job, new Path("/subscriber_status/subscriber_status.json")); //output FileOutputFormat.setOutputPath(job, out); FileOutputFormat.setCompressOutput(job, true); FileOutputFormat.setOutputCompressorClass(job, Lz4Codec.class); return job; }
From source file:com.soteradefense.dga.louvain.mapreduce.CommunityCompression.java
License:Apache License
public int run(String[] args) throws Exception { Configuration mrConf = this.getConf(); for (java.util.Map.Entry<String, String> entry : dgaConfiguration.getSystemProperties().entrySet()) { mrConf.set(entry.getKey(), entry.getValue()); }/*from w w w. j av a2s . c o m*/ Job job = Job.getInstance(mrConf); job.setJarByClass(CommunityCompression.class); Path in = new Path(inputPath); Path out = new Path(outputPath); FileInputFormat.setInputPaths(job, in); FileOutputFormat.setOutputPath(job, out); job.setJobName("CommunityCompression"); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LouvainVertexWritable.class); job.setMapperClass(CommunityCompression.Map.class); job.setReducerClass(CommunityCompression.Reduce.class); logger.debug("Running Mapreduce step with job configuration: {}", job); return job.waitForCompletion(false) ? 0 : 1; }
From source file:com.soteradefense.dga.louvain.mapreduce.LouvainTableSynthesizer.java
License:Apache License
@Override public int run(String[] args) throws Exception { Job job = null; try {// ww w . j a v a2 s . co m int iteration = 0; if (!basePath.endsWith("/")) basePath = basePath + "/"; String inputPath = basePath + GIRAPH_FOLDER_BASE_NAME + FILE_NAME_SEPARATOR + iteration; String joinPath = basePath + GIRAPH_FOLDER_BASE_NAME + FILE_NAME_SEPARATOR + (iteration + 1); String outputPath = basePath + TABLE_BASE_NAME + FILE_NAME_SEPARATOR + iteration; Configuration mrConf = this.getConf(); job = Job.getInstance(mrConf); for (Map.Entry<String, String> entry : dgaConfiguration.getSystemProperties().entrySet()) { mrConf.set(entry.getKey(), entry.getValue()); } FileSystem fs = FileSystem.get(job.getConfiguration()); boolean nextFileExists = fs.exists(new Path(joinPath)); while (nextFileExists) { System.out.println("Processing " + inputPath + " and " + joinPath); job = Job.getInstance(mrConf); job.setJobName("Louvain Table Synthesizer " + iteration); job.setJarByClass(LouvainTableSynthesizer.class); job.setMapperClass(LouvainTableSynthesizerMapper.class); job.setReducerClass(LouvainTableSynthesizerReducer.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); //Reducer Output job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class); //Add both input folders Path in = new Path(inputPath); Path joinIn = new Path(joinPath); Path out = new Path(outputPath); FileInputFormat.addInputPath(job, in); FileInputFormat.addInputPath(job, joinIn); FileOutputFormat.setOutputPath(job, out); job.waitForCompletion(true); //Set the new temp input path inputPath = outputPath; iteration++; outputPath = basePath + TABLE_BASE_NAME + FILE_NAME_SEPARATOR + iteration; joinPath = basePath + GIRAPH_FOLDER_BASE_NAME + FILE_NAME_SEPARATOR + (iteration + 1); nextFileExists = fs.exists(new Path(joinPath)); } } catch (IOException e) { e.printStackTrace(); return -1; } catch (InterruptedException e) { e.printStackTrace(); return -1; } catch (ClassNotFoundException e) { e.printStackTrace(); return -1; } return 0; }
From source file:com.soteradefense.dga.LouvainRunner.java
License:Apache License
private int runMapreduceJob(String inputPath, String outputPath, DGAConfiguration conf) throws Exception { Configuration mrConf = new Configuration(); for (Map.Entry<String, String> entry : conf.getSystemProperties().entrySet()) { mrConf.set(entry.getKey(), entry.getValue()); }//from w w w.ja v a2s .c o m Job job = Job.getInstance(configuration); job.setJarByClass(LouvainRunner.class); Path in = new Path(inputPath); Path out = new Path(outputPath); FileInputFormat.setInputPaths(job, in); FileOutputFormat.setOutputPath(job, out); job.setJobName("CommunityCompression"); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LouvainVertexWritable.class); job.setMapperClass(CommunityCompression.Map.class); job.setReducerClass(CommunityCompression.Reduce.class); logger.debug("Running Mapreduce step with job configuration: {}", job); return job.waitForCompletion(false) ? 0 : 1; }
From source file:com.splicemachine.mrio.api.SpliceTableMapReduceUtil.java
License:Apache License
/** * Use this before submitting a TableMap job. It will appropriately set up * the job./* ww w . j a v a 2 s . c o m*/ * * @param table The Splice table name to read from. * @param scan The scan instance with the columns, time range etc. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is * carrying all necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, Class<? extends Mapper> mapper, Class<? extends WritableComparable> outputKeyClass, Class<? extends Object> outputValueClass, Job job, boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass) throws IOException { job.setInputFormatClass(inputFormatClass); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass); if (mapper != null) job.setMapperClass(mapper); job.getConfiguration().set(MRConstants.SPLICE_INPUT_TABLE_NAME, table); job.getConfiguration().set(TableInputFormat.SCAN, convertScanToString(scan)); if (addDependencyJars) { addDependencyJars(job); } }
From source file:com.splunk.shuttl.integration.hadoop.hbase.CSVJobFactory.java
License:Apache License
/** * @return the hadoopConfiguration/*from w ww .j a v a2 s.c o m*/ * @throws IOException */ public static Job getConfiguredJob(String[] arguments) throws IOException { Configuration jobConfiguration = new Configuration(true); // Load hbase-site.xml HBaseConfiguration.addHbaseResources(jobConfiguration); jobConfiguration.set("fs.default.name", arguments[0]); jobConfiguration.set("mapred.job.tracker", arguments[1]); jobConfiguration.set(JobConfigurationConstants.FILENAME, arguments[2]); jobConfiguration.set(JobConfigurationConstants.OUTPUT_PATH, arguments[3]); jobConfiguration.set(JobConfigurationConstants.TABLE_NAME, arguments[4]); jobConfiguration.set(JobConfigurationConstants.COLUMN_FAMILY, "d"); Job job = new Job(jobConfiguration, "BucketToHbase"); job.setJarByClass(CSVMapper.class); job.setMapperClass(CSVMapper.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(KeyValue.class); job.setInputFormatClass(TextInputFormat.class); return job; }
From source file:com.synerzip.analytics.commoncrawl.googleads.counter.GoogleAdsCounterJob.java
License:Apache License
/** * Configures and submits the Map Reduce Job to Hadoop */// w w w. ja v a 2s . co m public int run(String[] args) throws Exception { String inputPath = null; String outputPath = null; boolean overwrite = false; String s3AccessKey = null; String s3SecretKey = null; // Read the command line arguments. We're not using GenericOptionsParser // to prevent having to include commons.cli as a dependency. for (int index = 0; index < args.length; index++) { try { if (ARGNAME_INPATH.equals(args[index])) { inputPath = args[++index]; } else if (ARGNAME_OUTPATH.equals(args[index])) { outputPath = args[++index]; } else if (ARGNAME_S3ACCESSKEY.equals(args[index])) { s3AccessKey = args[++index]; } else if (ARGNAME_S3SECRETKEY.equals(args[index])) { s3SecretKey = args[++index]; } else if (ARGNAME_MAXFILES.equals(args[index])) { // FIXME - No use of static methods WarcFileFilter.setMax(Long.parseLong(args[++index])); } else if (ARGNAME_OVERWRITE.equals(args[index])) { overwrite = true; } else { LOG.warn("Unsupported argument: " + args[index]); } } catch (ArrayIndexOutOfBoundsException e) { usage(); throw new IllegalArgumentException(); } } if (inputPath == null || outputPath == null) { usage(); throw new IllegalArgumentException(); } if (inputPath.contains("s3n") && (s3AccessKey == null || s3SecretKey == null)) { usage(); LOG.info("Please specify Access Key and Secret Key to access data on AWS S3 storage "); throw new IllegalArgumentException(); } // Create the Hadoop job. Configuration conf = new Configuration(); Job job = Job.getInstance(conf); job.setJarByClass(GoogleAdsCounterJob.class); if (inputPath.contains("s3n") && (s3AccessKey != null && s3SecretKey != null)) { conf.set("AWS_ACCESS_KEY_ID", s3AccessKey); conf.set("AWS_SECRET_ACCESS_KEY", s3SecretKey); } // Scan the provided input path for WARC files. LOG.info("setting input path to '" + inputPath + "'"); WarcFileFilter.setFilter(FILEFILTER); FileInputFormat.addInputPath(job, new Path(inputPath)); // FIXME - I see the problem that you want to give a dynamic number to a // static class. My question is, Is this really required, if we just // point to a file in s3 that should solve our problem FileInputFormat.setInputPathFilter(job, WarcFileFilter.class); // Delete the output path directory if it already exists and user wants // to overwrite it. if (overwrite) { LOG.info("clearing the output path at '" + outputPath + "'"); FileSystem fs = FileSystem.get(new URI(outputPath), conf); if (fs.exists(new Path(outputPath))) { fs.delete(new Path(outputPath), true); } } // Set the path where final output 'part' files will be saved. LOG.info("setting output path to '" + outputPath + "'"); FileOutputFormat.setOutputPath(job, new Path(outputPath)); /* * // Defines additional single text based output 'GoogleAdClient' for * the job MultipleOutputs.addNamedOutput(job, "GoogleAdClient", * TextOutputFormat.class, Text.class,LongWritable.class ); * * // Defines additional text based output 'GoogleAdType' for the job * MultipleOutputs.addNamedOutput(job, * "GoogleAdType",TextOutputFormat.class, Text.class, * LongWritable.class); */ // Set which InputFormat class to use. job.setInputFormatClass(WARCInputFormat.class); // Set which OutputFormat class to use. job.setOutputFormatClass(TextOutputFormat.class); /* * Using MultipleOutputs creates zero-sized default output e.g.: * * part-r-00000. To prevent this use LazyOutputFormat instead of * job.setOutputFormatClass(TextOutputFormat.class) in Hadoop job * configuration. */ // LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class); // job.setPartitionerClass(GoogleAdsCounterPartitioner.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); //job.setNumReduceTasks(4); // Set the output data types. job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); // Set which Mapper and Reducer classes to use. job.setMapperClass(GoogleAdsCounterMapper.class); // job.setMapperClass(CrawlMapper_AdStatsDetails.class); job.setReducerClass(GoogleAdsCounterReducer.class); // set combiner //job.setCombinerClass(GoogleAdsCounterReducer.class); // set job name job.setJobName("CommonCrawl Data Processing : Counting Google Ads"); long startTime = System.currentTimeMillis(); if (job.waitForCompletion(true)) { LOG.info("Job completion status : " + job.waitForCompletion(true)); long endTime = System.currentTimeMillis(); long difference = endTime - startTime; LOG.info("Elapsed milliseconds: " + difference); Counter totalResponsePagesCounter = job.getCounters().findCounter(TestCounters.TOTALRESPONSEPAGES); LOG.info("totalResponsePagesCounter = " + totalResponsePagesCounter.getValue()); Counter totalGoogleAdPagesCounter = job.getCounters().findCounter(TestCounters.TOTALGOOGLEADSPAGES); LOG.info("totalGoogleAdPagesCounter = " + totalGoogleAdPagesCounter.getValue()); return 0; } else { return 1; } }
From source file:com.talis.hadoop.rdf.collation.QuadsCollater.java
License:Apache License
@Override public int run(String[] args) throws Exception { Configuration configuration = getConf(); boolean useCompression = configuration.getBoolean(Constants.OPTION_USE_COMPRESSION, Constants.OPTION_USE_COMPRESSION_DEFAULT); if (useCompression) { configuration.setBoolean("mapred.compress.map.output", true); configuration.set("mapred.output.compression.type", "BLOCK"); configuration.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec"); }// w w w .j av a 2 s . co m boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT, Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT); FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration); if (overrideOutput) { fs.delete(new Path(args[1]), true); } Job job = new Job(configuration); job.setJobName(JOB_NAME); job.setJarByClass(getClass()); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); FileOutputFormat.setCompressOutput(job, true); job.setInputFormatClass(NQuadsInputFormat.class); job.setMapperClass(CollationMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(QuadWritable.class); job.setReducerClass(CollationReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(QuadArrayWritable.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); if (LOG.isDebugEnabled()) Utils.log(job, LOG); return job.waitForCompletion(true) ? 0 : 1; }
From source file:com.talis.labs.pagerank.mapreduce.CheckConvergence.java
License:Apache License
@Override public int run(String[] args) throws Exception { if (args.length != 2) { System.err.println("Usage: CheckConvergence <input path> <output path>"); return -1; }//w ww .jav a 2 s. c o m FileSystem.get(getConf()).delete(new Path(args[1]), true); Job job = new Job(getConf(), "CheckConvergence"); job.setJarByClass(getClass()); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); job.setMapperClass(CheckConvergenceMapper.class); job.setCombinerClass(CheckConvergenceReducer.class); job.setReducerClass(CheckConvergenceReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(DoubleWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(DoubleWritable.class); job.setNumReduceTasks(1); return job.waitForCompletion(true) ? 0 : 1; }