List of usage examples for org.apache.hadoop.mapred JobConf setCombinerClass
public void setCombinerClass(Class<? extends Reducer> theClass)
From source file:org.apache.sysml.runtime.matrix.DataGenMR.java
License:Apache License
/** * <p>Starts a Rand MapReduce job which will produce one or more random objects.</p> * //from ww w . j ava 2s .com * @param inst MR job instruction * @param dataGenInstructions array of data gen instructions * @param instructionsInMapper instructions in mapper * @param aggInstructionsInReducer aggregate instructions in reducer * @param otherInstructionsInReducer other instructions in reducer * @param numReducers number of reducers * @param replication file replication * @param resultIndexes result indexes for each random object * @param dimsUnknownFilePrefix file path prefix when dimensions unknown * @param outputs output file for each random object * @param outputInfos output information for each random object * @return matrix characteristics for each random object * @throws Exception if Exception occurs */ public static JobReturn runJob(MRJobInstruction inst, String[] dataGenInstructions, String instructionsInMapper, String aggInstructionsInReducer, String otherInstructionsInReducer, int numReducers, int replication, byte[] resultIndexes, String dimsUnknownFilePrefix, String[] outputs, OutputInfo[] outputInfos) throws Exception { JobConf job = new JobConf(DataGenMR.class); job.setJobName("DataGen-MR"); //whether use block representation or cell representation MRJobConfiguration.setMatrixValueClass(job, true); byte[] realIndexes = new byte[dataGenInstructions.length]; for (byte b = 0; b < realIndexes.length; b++) realIndexes[b] = b; String[] inputs = new String[dataGenInstructions.length]; InputInfo[] inputInfos = new InputInfo[dataGenInstructions.length]; long[] rlens = new long[dataGenInstructions.length]; long[] clens = new long[dataGenInstructions.length]; int[] brlens = new int[dataGenInstructions.length]; int[] bclens = new int[dataGenInstructions.length]; FileSystem fs = FileSystem.get(job); String dataGenInsStr = ""; int numblocks = 0; int maxbrlen = -1, maxbclen = -1; double maxsparsity = -1; for (int i = 0; i < dataGenInstructions.length; i++) { dataGenInsStr = dataGenInsStr + Lop.INSTRUCTION_DELIMITOR + dataGenInstructions[i]; MRInstruction mrins = MRInstructionParser.parseSingleInstruction(dataGenInstructions[i]); MRINSTRUCTION_TYPE mrtype = mrins.getMRInstructionType(); DataGenMRInstruction genInst = (DataGenMRInstruction) mrins; rlens[i] = genInst.getRows(); clens[i] = genInst.getCols(); brlens[i] = genInst.getRowsInBlock(); bclens[i] = genInst.getColsInBlock(); maxbrlen = Math.max(maxbrlen, brlens[i]); maxbclen = Math.max(maxbclen, bclens[i]); if (mrtype == MRINSTRUCTION_TYPE.Rand) { RandInstruction randInst = (RandInstruction) mrins; inputs[i] = LibMatrixDatagen.generateUniqueSeedPath(genInst.getBaseDir()); maxsparsity = Math.max(maxsparsity, randInst.getSparsity()); PrintWriter pw = null; try { pw = new PrintWriter(fs.create(new Path(inputs[i]))); //for obj reuse and preventing repeated buffer re-allocations StringBuilder sb = new StringBuilder(); //seed generation Well1024a bigrand = LibMatrixDatagen.setupSeedsForRand(randInst.getSeed()); LongStream nnz = LibMatrixDatagen.computeNNZperBlock(rlens[i], clens[i], brlens[i], bclens[i], randInst.getSparsity()); PrimitiveIterator.OfLong nnzIter = nnz.iterator(); for (long r = 0; r < rlens[i]; r += brlens[i]) { long curBlockRowSize = Math.min(brlens[i], (rlens[i] - r)); for (long c = 0; c < clens[i]; c += bclens[i]) { long curBlockColSize = Math.min(bclens[i], (clens[i] - c)); sb.append((r / brlens[i]) + 1); sb.append(','); sb.append((c / bclens[i]) + 1); sb.append(','); sb.append(curBlockRowSize); sb.append(','); sb.append(curBlockColSize); sb.append(','); sb.append(nnzIter.nextLong()); sb.append(','); sb.append(bigrand.nextLong()); pw.println(sb.toString()); sb.setLength(0); numblocks++; } } } finally { IOUtilFunctions.closeSilently(pw); } inputInfos[i] = InputInfo.TextCellInputInfo; } else if (mrtype == MRINSTRUCTION_TYPE.Seq) { SeqInstruction seqInst = (SeqInstruction) mrins; inputs[i] = genInst.getBaseDir() + System.currentTimeMillis() + ".seqinput"; maxsparsity = 1.0; //always dense double from = seqInst.fromValue; double to = seqInst.toValue; double incr = seqInst.incrValue; //handle default 1 to -1 for special case of from>to incr = LibMatrixDatagen.updateSeqIncr(from, to, incr); // Correctness checks on (from, to, incr) boolean neg = (from > to); if (incr == 0) throw new DMLRuntimeException("Invalid value for \"increment\" in seq()."); if (neg != (incr < 0)) throw new DMLRuntimeException("Wrong sign for the increment in a call to seq()"); // Compute the number of rows in the sequence long numrows = UtilFunctions.getSeqLength(from, to, incr); if (rlens[i] > 0) { if (numrows != rlens[i]) throw new DMLRuntimeException( "Unexpected error while processing sequence instruction. Expected number of rows does not match given number: " + rlens[i] + " != " + numrows); } else { rlens[i] = numrows; } if (clens[i] > 0 && clens[i] != 1) throw new DMLRuntimeException( "Unexpected error while processing sequence instruction. Number of columns (" + clens[i] + ") must be equal to 1."); else clens[i] = 1; PrintWriter pw = null; try { pw = new PrintWriter(fs.create(new Path(inputs[i]))); StringBuilder sb = new StringBuilder(); double temp = from; double block_from, block_to; for (long r = 0; r < rlens[i]; r += brlens[i]) { long curBlockRowSize = Math.min(brlens[i], (rlens[i] - r)); // block (bid_i,bid_j) generates a sequence from the interval [block_from, block_to] (inclusive of both end points of the interval) long bid_i = ((r / brlens[i]) + 1); long bid_j = 1; block_from = temp; block_to = temp + (curBlockRowSize - 1) * incr; temp = block_to + incr; // next block starts from here sb.append(bid_i); sb.append(','); sb.append(bid_j); sb.append(','); sb.append(block_from); sb.append(','); sb.append(block_to); sb.append(','); sb.append(incr); pw.println(sb.toString()); sb.setLength(0); numblocks++; } } finally { IOUtilFunctions.closeSilently(pw); } inputInfos[i] = InputInfo.TextCellInputInfo; } else { throw new DMLRuntimeException("Unexpected Data Generation Instruction Type: " + mrtype); } } dataGenInsStr = dataGenInsStr.substring(1);//remove the first "," RunningJob runjob; MatrixCharacteristics[] stats; try { //set up the block size MRJobConfiguration.setBlocksSizes(job, realIndexes, brlens, bclens); //set up the input files and their format information MRJobConfiguration.setUpMultipleInputs(job, realIndexes, inputs, inputInfos, brlens, bclens, false, ConvertTarget.BLOCK); //set up the dimensions of input matrices MRJobConfiguration.setMatricesDimensions(job, realIndexes, rlens, clens); MRJobConfiguration.setDimsUnknownFilePrefix(job, dimsUnknownFilePrefix); //set up the block size MRJobConfiguration.setBlocksSizes(job, realIndexes, brlens, bclens); //set up the rand Instructions MRJobConfiguration.setRandInstructions(job, dataGenInsStr); //set up unary instructions that will perform in the mapper MRJobConfiguration.setInstructionsInMapper(job, instructionsInMapper); //set up the aggregate instructions that will happen in the combiner and reducer MRJobConfiguration.setAggregateInstructions(job, aggInstructionsInReducer); //set up the instructions that will happen in the reducer, after the aggregation instrucions MRJobConfiguration.setInstructionsInReducer(job, otherInstructionsInReducer); //set up the replication factor for the results job.setInt(MRConfigurationNames.DFS_REPLICATION, replication); //set up map/reduce memory configurations (if in AM context) DMLConfig config = ConfigurationManager.getDMLConfig(); DMLAppMasterUtils.setupMRJobRemoteMaxMemory(job, config); //set up custom map/reduce configurations MRJobConfiguration.setupCustomMRConfigurations(job, config); //determine degree of parallelism (nmappers: 1<=n<=capacity) //TODO use maxsparsity whenever we have a way of generating sparse rand data int capacity = InfrastructureAnalyzer.getRemoteParallelMapTasks(); long dfsblocksize = InfrastructureAnalyzer.getHDFSBlockSize(); //correction max number of mappers on yarn clusters if (InfrastructureAnalyzer.isYarnEnabled()) capacity = (int) Math.max(capacity, YarnClusterAnalyzer.getNumCores()); int nmapers = Math .max(Math.min((int) (8 * maxbrlen * maxbclen * (long) numblocks / dfsblocksize), capacity), 1); job.setNumMapTasks(nmapers); //set up what matrices are needed to pass from the mapper to reducer HashSet<Byte> mapoutputIndexes = MRJobConfiguration.setUpOutputIndexesForMapper(job, realIndexes, dataGenInsStr, instructionsInMapper, null, aggInstructionsInReducer, otherInstructionsInReducer, resultIndexes); MatrixChar_N_ReducerGroups ret = MRJobConfiguration.computeMatrixCharacteristics(job, realIndexes, dataGenInsStr, instructionsInMapper, null, aggInstructionsInReducer, null, otherInstructionsInReducer, resultIndexes, mapoutputIndexes, false); stats = ret.stats; //set up the number of reducers MRJobConfiguration.setNumReducers(job, ret.numReducerGroups, numReducers); // print the complete MRJob instruction if (LOG.isTraceEnabled()) inst.printCompleteMRJobInstruction(stats); // Update resultDimsUnknown based on computed "stats" byte[] resultDimsUnknown = new byte[resultIndexes.length]; for (int i = 0; i < resultIndexes.length; i++) { if (stats[i].getRows() == -1 || stats[i].getCols() == -1) { resultDimsUnknown[i] = (byte) 1; } else { resultDimsUnknown[i] = (byte) 0; } } boolean mayContainCtable = instructionsInMapper.contains("ctabletransform") || instructionsInMapper.contains("groupedagg"); //set up the multiple output files, and their format information MRJobConfiguration.setUpMultipleOutputs(job, resultIndexes, resultDimsUnknown, outputs, outputInfos, true, mayContainCtable); // configure mapper and the mapper output key value pairs job.setMapperClass(DataGenMapper.class); if (numReducers == 0) { job.setMapOutputKeyClass(Writable.class); job.setMapOutputValueClass(Writable.class); } else { job.setMapOutputKeyClass(MatrixIndexes.class); job.setMapOutputValueClass(TaggedMatrixBlock.class); } //set up combiner if (numReducers != 0 && aggInstructionsInReducer != null && !aggInstructionsInReducer.isEmpty()) job.setCombinerClass(GMRCombiner.class); //configure reducer job.setReducerClass(GMRReducer.class); //job.setReducerClass(PassThroughReducer.class); // By default, the job executes in "cluster" mode. // Determine if we can optimize and run it in "local" mode. MatrixCharacteristics[] inputStats = new MatrixCharacteristics[inputs.length]; for (int i = 0; i < inputs.length; i++) { inputStats[i] = new MatrixCharacteristics(rlens[i], clens[i], brlens[i], bclens[i]); } //set unique working dir MRJobConfiguration.setUniqueWorkingDir(job); runjob = JobClient.runJob(job); /* Process different counters */ Group group = runjob.getCounters().getGroup(MRJobConfiguration.NUM_NONZERO_CELLS); for (int i = 0; i < resultIndexes.length; i++) { // number of non-zeros stats[i].setNonZeros(group.getCounter(Integer.toString(i))); } String dir = dimsUnknownFilePrefix + "/" + runjob.getID().toString() + "_dimsFile"; stats = MapReduceTool.processDimsFiles(dir, stats); MapReduceTool.deleteFileIfExistOnHDFS(dir); } finally { for (String input : inputs) MapReduceTool.deleteFileIfExistOnHDFS(new Path(input), job); } return new JobReturn(stats, outputInfos, runjob.isSuccessful()); }
From source file:org.apache.sysml.runtime.matrix.GMR.java
License:Apache License
/** * Execute job.//from ww w .ja v a 2 s. co m * * @param inst MR job instruction * @param inputs input matrices, the inputs are indexed by 0, 1, 2, .. based on the position in this string * @param inputInfos the input format information for the input matrices * @param rlens array of number of rows * @param clens array of number of columns * @param brlens array of number of rows in block * @param bclens array of number of columns in block * @param partitioned boolean array of partitioned status * @param pformats array of data partition formats * @param psizes does nothing * @param recordReaderInstruction record reader instruction * @param instructionsInMapper in Mapper, the set of unary operations that need to be performed on each input matrix * @param aggInstructionsInReducer in Reducer, right after sorting, the set of aggreagte operations * that need to be performed on each input matrix * @param otherInstructionsInReducer the mixed operations that need to be performed on matrices after the aggregate operations * @param numReducers the number of reducers * @param replication the replication factor for the output * @param jvmReuse if true, reuse JVM * @param resultIndexes the indexes of the result matrices that needs to be outputted * @param dimsUnknownFilePrefix file path prefix when dimensions unknown * @param outputs the names for the output directories, one for each result index * @param outputInfos output format information for the output matrices * @return job return object * @throws Exception if Exception occurs */ @SuppressWarnings({ "unchecked", "rawtypes" }) public static JobReturn runJob(MRJobInstruction inst, String[] inputs, InputInfo[] inputInfos, long[] rlens, long[] clens, int[] brlens, int[] bclens, boolean[] partitioned, PDataPartitionFormat[] pformats, int[] psizes, String recordReaderInstruction, String instructionsInMapper, String aggInstructionsInReducer, String otherInstructionsInReducer, int numReducers, int replication, boolean jvmReuse, byte[] resultIndexes, String dimsUnknownFilePrefix, String[] outputs, OutputInfo[] outputInfos) throws Exception { JobConf job = new JobConf(GMR.class); job.setJobName("G-MR"); boolean inBlockRepresentation = MRJobConfiguration.deriveRepresentation(inputInfos); //whether use block representation or cell representation MRJobConfiguration.setMatrixValueClass(job, inBlockRepresentation); //added for handling recordreader instruction String[] realinputs = inputs; InputInfo[] realinputInfos = inputInfos; long[] realrlens = rlens; long[] realclens = clens; int[] realbrlens = brlens; int[] realbclens = bclens; byte[] realIndexes = new byte[inputs.length]; for (byte b = 0; b < realIndexes.length; b++) realIndexes[b] = b; if (recordReaderInstruction != null && !recordReaderInstruction.isEmpty()) { assert (inputs.length <= 2); PickByCountInstruction ins = (PickByCountInstruction) PickByCountInstruction .parseInstruction(recordReaderInstruction); PickFromCompactInputFormat.setKeyValueClasses(job, (Class<? extends WritableComparable>) inputInfos[ins.input1].inputKeyClass, inputInfos[ins.input1].inputValueClass); job.setInputFormat(PickFromCompactInputFormat.class); PickFromCompactInputFormat.setZeroValues(job, (MetaDataNumItemsByEachReducer) inputInfos[ins.input1].metadata); if (ins.isValuePick) { double[] probs = MapReduceTool.readColumnVectorFromHDFS(inputs[ins.input2], inputInfos[ins.input2], rlens[ins.input2], clens[ins.input2], brlens[ins.input2], bclens[ins.input2]); PickFromCompactInputFormat.setPickRecordsInEachPartFile(job, (MetaDataNumItemsByEachReducer) inputInfos[ins.input1].metadata, probs); realinputs = new String[inputs.length - 1]; realinputInfos = new InputInfo[inputs.length - 1]; realrlens = new long[inputs.length - 1]; realclens = new long[inputs.length - 1]; realbrlens = new int[inputs.length - 1]; realbclens = new int[inputs.length - 1]; realIndexes = new byte[inputs.length - 1]; byte realIndex = 0; for (byte i = 0; i < inputs.length; i++) { if (i == ins.input2) continue; realinputs[realIndex] = inputs[i]; realinputInfos[realIndex] = inputInfos[i]; if (i == ins.input1) { realrlens[realIndex] = rlens[ins.input2]; realclens[realIndex] = clens[ins.input2]; realbrlens[realIndex] = 1; realbclens[realIndex] = 1; realIndexes[realIndex] = ins.output; } else { realrlens[realIndex] = rlens[i]; realclens[realIndex] = clens[i]; realbrlens[realIndex] = brlens[i]; realbclens[realIndex] = bclens[i]; realIndexes[realIndex] = i; } realIndex++; } } else { //PickFromCompactInputFormat.setPickRecordsInEachPartFile(job, (NumItemsByEachReducerMetaData) inputInfos[ins.input1].metadata, ins.cst, 1-ins.cst); PickFromCompactInputFormat.setRangePickPartFiles(job, (MetaDataNumItemsByEachReducer) inputInfos[ins.input1].metadata, ins.cst, 1 - ins.cst); realrlens[ins.input1] = UtilFunctions.getLengthForInterQuantile( (MetaDataNumItemsByEachReducer) inputInfos[ins.input1].metadata, ins.cst); realclens[ins.input1] = clens[ins.input1]; realbrlens[ins.input1] = 1; realbclens[ins.input1] = 1; realIndexes[ins.input1] = ins.output; } } boolean resetDistCache = setupDistributedCache(job, instructionsInMapper, otherInstructionsInReducer, realinputs, realrlens, realclens); //set up the input files and their format information boolean[] distCacheOnly = getDistCacheOnlyInputs(realIndexes, recordReaderInstruction, instructionsInMapper, aggInstructionsInReducer, otherInstructionsInReducer); MRJobConfiguration.setUpMultipleInputs(job, realIndexes, realinputs, realinputInfos, realbrlens, realbclens, distCacheOnly, true, inBlockRepresentation ? ConvertTarget.BLOCK : ConvertTarget.CELL); MRJobConfiguration.setInputPartitioningInfo(job, pformats); //set up the dimensions of input matrices MRJobConfiguration.setMatricesDimensions(job, realIndexes, realrlens, realclens); MRJobConfiguration.setDimsUnknownFilePrefix(job, dimsUnknownFilePrefix); //set up the block size MRJobConfiguration.setBlocksSizes(job, realIndexes, realbrlens, realbclens); //set up unary instructions that will perform in the mapper MRJobConfiguration.setInstructionsInMapper(job, instructionsInMapper); //set up the aggregate instructions that will happen in the combiner and reducer MRJobConfiguration.setAggregateInstructions(job, aggInstructionsInReducer); //set up the instructions that will happen in the reducer, after the aggregation instructions MRJobConfiguration.setInstructionsInReducer(job, otherInstructionsInReducer); //set up the replication factor for the results job.setInt(MRConfigurationNames.DFS_REPLICATION, replication); //set up preferred custom serialization framework for binary block format if (MRJobConfiguration.USE_BINARYBLOCK_SERIALIZATION) MRJobConfiguration.addBinaryBlockSerializationFramework(job); //set up map/reduce memory configurations (if in AM context) DMLConfig config = ConfigurationManager.getDMLConfig(); DMLAppMasterUtils.setupMRJobRemoteMaxMemory(job, config); //set up custom map/reduce configurations MRJobConfiguration.setupCustomMRConfigurations(job, config); //set up jvm reuse (incl. reuse of loaded dist cache matrices) if (jvmReuse) job.setNumTasksToExecutePerJvm(-1); //set up what matrices are needed to pass from the mapper to reducer HashSet<Byte> mapoutputIndexes = MRJobConfiguration.setUpOutputIndexesForMapper(job, realIndexes, instructionsInMapper, aggInstructionsInReducer, otherInstructionsInReducer, resultIndexes); MatrixChar_N_ReducerGroups ret = MRJobConfiguration.computeMatrixCharacteristics(job, realIndexes, instructionsInMapper, aggInstructionsInReducer, null, otherInstructionsInReducer, resultIndexes, mapoutputIndexes, false); MatrixCharacteristics[] stats = ret.stats; //set up the number of reducers MRJobConfiguration.setNumReducers(job, ret.numReducerGroups, numReducers); // Print the complete instruction if (LOG.isTraceEnabled()) inst.printCompleteMRJobInstruction(stats); // Update resultDimsUnknown based on computed "stats" byte[] dimsUnknown = new byte[resultIndexes.length]; for (int i = 0; i < resultIndexes.length; i++) { if (stats[i].getRows() == -1 || stats[i].getCols() == -1) { dimsUnknown[i] = (byte) 1; } else { dimsUnknown[i] = (byte) 0; } } //MRJobConfiguration.updateResultDimsUnknown(job,resultDimsUnknown); //set up the multiple output files, and their format information MRJobConfiguration.setUpMultipleOutputs(job, resultIndexes, dimsUnknown, outputs, outputInfos, inBlockRepresentation, true); // configure mapper and the mapper output key value pairs job.setMapperClass(GMRMapper.class); if (numReducers == 0) { job.setMapOutputKeyClass(Writable.class); job.setMapOutputValueClass(Writable.class); } else { job.setMapOutputKeyClass(MatrixIndexes.class); if (inBlockRepresentation) job.setMapOutputValueClass(TaggedMatrixBlock.class); else job.setMapOutputValueClass(TaggedMatrixPackedCell.class); } //set up combiner if (numReducers != 0 && aggInstructionsInReducer != null && !aggInstructionsInReducer.isEmpty()) { job.setCombinerClass(GMRCombiner.class); } //configure reducer job.setReducerClass(GMRReducer.class); //job.setReducerClass(PassThroughReducer.class); // By default, the job executes in "cluster" mode. // Determine if we can optimize and run it in "local" mode. MatrixCharacteristics[] inputStats = new MatrixCharacteristics[inputs.length]; for (int i = 0; i < inputs.length; i++) { inputStats[i] = new MatrixCharacteristics(rlens[i], clens[i], brlens[i], bclens[i]); } //set unique working dir MRJobConfiguration.setUniqueWorkingDir(job); RunningJob runjob = JobClient.runJob(job); Group group = runjob.getCounters().getGroup(MRJobConfiguration.NUM_NONZERO_CELLS); for (int i = 0; i < resultIndexes.length; i++) stats[i].setNonZeros(group.getCounter(Integer.toString(i))); //cleanups String dir = dimsUnknownFilePrefix + "/" + runjob.getID().toString() + "_dimsFile"; stats = MapReduceTool.processDimsFiles(dir, stats); MapReduceTool.deleteFileIfExistOnHDFS(dir); if (resetDistCache) MRBaseForCommonInstructions.resetDistCache(); return new JobReturn(stats, outputInfos, runjob.isSuccessful()); }
From source file:org.apache.sysml.runtime.matrix.GroupedAggMR.java
License:Apache License
public static JobReturn runJob(MRJobInstruction inst, String[] inputs, InputInfo[] inputInfos, long[] rlens, long[] clens, int[] brlens, int[] bclens, String grpAggInstructions, String simpleReduceInstructions/*only scalar or reorg instructions allowed*/, int numReducers, int replication, byte[] resultIndexes, String dimsUnknownFilePrefix, String[] outputs, OutputInfo[] outputInfos) throws Exception { JobConf job = new JobConf(GroupedAggMR.class); job.setJobName("GroupedAgg-MR"); //whether use block representation or cell representation //MRJobConfiguration.setMatrixValueClassForCM_N_COM(job, true); MRJobConfiguration.setMatrixValueClass(job, false); //added for handling recordreader instruction String[] realinputs = inputs; InputInfo[] realinputInfos = inputInfos; long[] realrlens = rlens; long[] realclens = clens; int[] realbrlens = brlens; int[] realbclens = bclens; byte[] realIndexes = new byte[inputs.length]; for (byte b = 0; b < realIndexes.length; b++) realIndexes[b] = b;/*from w w w.j a v a2s . c o m*/ //set up the input files and their format information MRJobConfiguration.setUpMultipleInputs(job, realIndexes, realinputs, realinputInfos, realbrlens, realbclens, true, ConvertTarget.WEIGHTEDCELL); //set up the dimensions of input matrices MRJobConfiguration.setMatricesDimensions(job, realIndexes, realrlens, realclens); MRJobConfiguration.setDimsUnknownFilePrefix(job, dimsUnknownFilePrefix); //set up the block size MRJobConfiguration.setBlocksSizes(job, realIndexes, realbrlens, realbclens); //set up the grouped aggregate instructions that will happen in the combiner and reducer MRJobConfiguration.setGroupedAggInstructions(job, grpAggInstructions); //set up the instructions that will happen in the reducer, after the aggregation instrucions MRJobConfiguration.setInstructionsInReducer(job, simpleReduceInstructions); //set up the number of reducers MRJobConfiguration.setNumReducers(job, numReducers, numReducers); //set up the replication factor for the results job.setInt(MRConfigurationNames.DFS_REPLICATION, replication); //set up custom map/reduce configurations DMLConfig config = ConfigurationManager.getDMLConfig(); MRJobConfiguration.setupCustomMRConfigurations(job, config); //set up what matrices are needed to pass from the mapper to reducer MRJobConfiguration.setUpOutputIndexesForMapper(job, realIndexes, null, null, grpAggInstructions, resultIndexes); MatrixCharacteristics[] stats = new MatrixCharacteristics[resultIndexes.length]; for (int i = 0; i < resultIndexes.length; i++) stats[i] = new MatrixCharacteristics(); // Print the complete instruction if (LOG.isTraceEnabled()) inst.printCompleteMRJobInstruction(stats); byte[] resultDimsUnknown = new byte[resultIndexes.length]; // Update resultDimsUnknown based on computed "stats" for (int i = 0; i < resultIndexes.length; i++) resultDimsUnknown[i] = (byte) 2; //set up the multiple output files, and their format information MRJobConfiguration.setUpMultipleOutputs(job, resultIndexes, resultDimsUnknown, outputs, outputInfos, false); // configure mapper and the mapper output key value pairs job.setMapperClass(GroupedAggMRMapper.class); job.setCombinerClass(GroupedAggMRCombiner.class); job.setMapOutputKeyClass(TaggedMatrixIndexes.class); job.setMapOutputValueClass(WeightedCell.class); //configure reducer job.setReducerClass(GroupedAggMRReducer.class); //set unique working dir MRJobConfiguration.setUniqueWorkingDir(job); //execute job RunningJob runjob = JobClient.runJob(job); //get important output statistics Group group = runjob.getCounters().getGroup(MRJobConfiguration.NUM_NONZERO_CELLS); for (int i = 0; i < resultIndexes.length; i++) { // number of non-zeros stats[i] = new MatrixCharacteristics(); stats[i].setNonZeros(group.getCounter(Integer.toString(i))); } String dir = dimsUnknownFilePrefix + "/" + runjob.getID().toString() + "_dimsFile"; stats = MapReduceTool.processDimsFiles(dir, stats); MapReduceTool.deleteFileIfExistOnHDFS(dir); return new JobReturn(stats, outputInfos, runjob.isSuccessful()); }
From source file:org.apache.tez.mapreduce.examples.MapredWordCount.java
License:Apache License
/** * The main driver for word count map/reduce program. * Invoke this method to submit the map/reduce job. * @throws IOException When there is communication problems with the * job tracker.//from w w w. j a va 2 s. com */ public int run(String[] args) throws Exception { JobConf conf = new JobConf(getConf(), MapredWordCount.class); conf.setJobName("wordcount"); LOG.info("Running WordCount job using mapred apis"); // the keys are words (strings) conf.setOutputKeyClass(Text.class); // the values are counts (ints) conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(MapClass.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); List<String> other_args = new ArrayList<String>(); for (int i = 0; i < args.length; ++i) { try { if ("-m".equals(args[i])) { conf.setNumMapTasks(Integer.parseInt(args[++i])); } else if ("-r".equals(args[i])) { conf.setNumReduceTasks(Integer.parseInt(args[++i])); } else { other_args.add(args[i]); } } catch (NumberFormatException except) { LOG.error("Integer expected instead of " + args[i]); return printUsage(); } catch (ArrayIndexOutOfBoundsException except) { LOG.error("Required parameter missing from " + args[i - 1]); return printUsage(); } } // Make sure there are exactly 2 parameters left. if (other_args.size() != 2) { LOG.error("Wrong number of parameters: " + other_args.size() + " instead of 2."); return printUsage(); } FileInputFormat.setInputPaths(conf, other_args.get(0)); FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1))); JobClient.runJob(conf); return 0; }
From source file:org.archive.jbs.Merge.java
License:Apache License
public int run(String[] args) throws Exception { if (args.length < 2) { System.err.println("jbs.Merge <output> <input>..."); return 1; }/*from w w w. j av a 2 s.c o m*/ JobConf conf = new JobConf(getConf(), Merge.class); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(Text.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); // Choose the outputformat to either merge or index the records // // org.archive.jbs.lucene.LuceneOutputFormat // - builds local Lucene index // // org.archive.jbs.solr.SolrOutputFormat // - sends documents to remote Solr server // // org.apache.hadoop.mapred.MapFileOutputFormat // - writes merged documents to Hadoop MapFile conf.setOutputFormat((Class) Class .forName(conf.get("jbs.outputformat.class", "org.apache.hadoop.mapred.MapFileOutputFormat"))); // Set the Hadoop job name to incorporate the output format name. String formatName = conf.getOutputFormat().getClass().getName(); conf.setJobName("jbs.Merge " + formatName.substring(formatName.lastIndexOf('.') != -1 ? (formatName.lastIndexOf('.') + 1) : 0)); // Add the input paths as either NutchWAX segment directories or // text .dup files. for (int i = 1; i < args.length; i++) { Path p = new Path(args[i]); // Expand any file globs and then check each matching path FileStatus[] files = FileSystem.get(conf).globStatus(p); for (FileStatus file : files) { if (file.isDir()) { // If it's a directory, then check if it is a Nutch segment, otherwise treat as a SequenceFile. if (p.getFileSystem(conf).exists(new Path(file.getPath(), "parse_data"))) { LOG.info("Input NutchWax: " + file.getPath()); MultipleInputs.addInputPath(conf, new Path(file.getPath(), "parse_data"), SequenceFileInputFormat.class, NutchMapper.class); MultipleInputs.addInputPath(conf, new Path(file.getPath(), "parse_text"), SequenceFileInputFormat.class, NutchMapper.class); } else { // Assume it's a SequenceFile of JSON-encoded Documents. LOG.info("Input Document: " + file.getPath()); MultipleInputs.addInputPath(conf, file.getPath(), SequenceFileInputFormat.class, DocumentMapper.class); } } else { // Not a directory, assume it's a text file, either CDX or property specifications. LOG.info("Input TextFile: " + file.getPath()); MultipleInputs.addInputPath(conf, file.getPath(), TextInputFormat.class, TextMapper.class); } } } FileOutputFormat.setOutputPath(conf, new Path(args[0])); RunningJob rj = JobClient.runJob(conf); return rj.isSuccessful() ? 0 : 1; }
From source file:org.archive.jbs.misc.LanguageIdent.java
License:Apache License
public int run(String[] args) throws Exception { if (args.length < 2) { System.err.println("LanguageIdent <output> <input>..."); return 1; }/* w w w.ja va2 s. c o m*/ JobConf conf = new JobConf(getConf(), LanguageIdent.class); conf.setJobName("LanguageIdent"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(Text.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); conf.setOutputFormat(TextOutputFormat.class); // Assume the inputs are NutchWAX segments. for (int i = 1; i < args.length; i++) { Path p = new Path(args[i]); if (p.getFileSystem(conf).isFile(p)) { // FIXME: Emit an error message. } else { // No need to process the metadata, just the page contents. // MultipleInputs.addInputPath( conf, new Path( p, "parse_data" ), SequenceFileInputFormat.class, Map.class ); MultipleInputs.addInputPath(conf, new Path(p, "parse_text"), SequenceFileInputFormat.class, Map.class); } } FileOutputFormat.setOutputPath(conf, new Path(args[0])); JobClient.runJob(conf); return 0; }
From source file:org.archive.jbs.misc.LinkCounts.java
License:Apache License
public int run(String[] args) throws Exception { if (args.length < 2) { System.err.println("LinkCounts <output> <input>..."); return 1; }// www. j a va2 s.c o m JobConf conf = new JobConf(getConf(), LinkCounts.class); conf.setJobName("LinkCounts"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); conf.setOutputFormat(TextOutputFormat.class); // Assume the inputs are NutchWAX segments. for (int i = 1; i < args.length; i++) { Path p = new Path(args[i]); if (p.getFileSystem(conf).isFile(p)) { // FIXME: Emit an error message. } else { MultipleInputs.addInputPath(conf, new Path(p, "parse_data"), SequenceFileInputFormat.class, Map.class); MultipleInputs.addInputPath(conf, new Path(p, "parse_text"), SequenceFileInputFormat.class, Map.class); } } FileOutputFormat.setOutputPath(conf, new Path(args[0])); JobClient.runJob(conf); return 0; }
From source file:org.archive.jbs.misc.NGrams.java
License:Apache License
public int run(String[] args) throws Exception { if (args.length < 2) { System.err.println("NGrams <output> <input>..."); return 1; }/* www . j a v a 2 s. c o m*/ JobConf conf = new JobConf(getConf(), NGrams.class); conf.setJobName("NGrams"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); // FIXME: Do we need this when using the MultipleInputs class below? // Looks like the answer is no. // conf.setInputFormat(SequenceFileInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); // Assume the inputs are NutchWAX segments. for (int i = 1; i < args.length; i++) { Path p = new Path(args[i]); if (p.getFileSystem(conf).isFile(p)) { // FIXME: Emit an error message. } else { MultipleInputs.addInputPath(conf, new Path(p, "parse_data"), SequenceFileInputFormat.class, Map.class); MultipleInputs.addInputPath(conf, new Path(p, "parse_text"), SequenceFileInputFormat.class, Map.class); } } FileOutputFormat.setOutputPath(conf, new Path(args[0])); JobClient.runJob(conf); return 0; }
From source file:org.archive.jbs.misc.URLPathPartCounter.java
License:Apache License
public int run(String[] args) throws Exception { if (args.length < 2) { System.err.println("URLPathPartCounter <output> <input>..."); return 1; }//ww w .j a va 2 s . c o m JobConf conf = new JobConf(getConf(), URLPathPartCounter.class); conf.setJobName("URLPathPartCounter"); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setMapperClass(Map.class); conf.setCombinerClass(Reduce.class); conf.setReducerClass(Reduce.class); // FIXME: Do we need this when using the MultipleInputs class below? // Looks like the answer is no. // conf.setInputFormat(SequenceFileInputFormat.class); conf.setOutputFormat(TextOutputFormat.class); // for (int i = 1; i < args.length; i++) { Path p = new Path(args[i]); if (p.getFileSystem(conf).isFile(p)) { // FIXME: Emit an error message. } else { MultipleInputs.addInputPath(conf, new Path(p, "parse_data"), SequenceFileInputFormat.class, Map.class); // We don't need the parse_text, just the metadata in parse_data. // MultipleInputs.addInputPath( conf, new Path( p, "parse_text" ), SequenceFileInputFormat.class, Map.class ); } } FileOutputFormat.setOutputPath(conf, new Path(args[0])); JobClient.runJob(conf); return 0; }
From source file:org.archive.nutchwax.PageRankDb.java
License:Apache License
private static JobConf createJob(Configuration config, Path pageRankDb, boolean normalize, boolean filter) { Path newPageRankDb = new Path("pagerankdb-" + Integer.toString(new Random().nextInt(Integer.MAX_VALUE))); JobConf job = new NutchJob(config); job.setJobName("pagerankdb " + pageRankDb); job.setInputFormat(SequenceFileInputFormat.class); job.setMapperClass(PageRankDb.class); job.setCombinerClass(PageRankDbMerger.class); // if we don't run the mergeJob, perform normalization/filtering now if (normalize || filter) { try {/*from w ww . ja va2 s . c o m*/ FileSystem fs = FileSystem.get(config); if (!fs.exists(pageRankDb)) { job.setBoolean(LinkDbFilter.URL_FILTERING, filter); job.setBoolean(LinkDbFilter.URL_NORMALIZING, normalize); } } catch (Exception e) { LOG.warn("PageRankDb createJob: " + e); } } job.setReducerClass(PageRankDbMerger.class); FileOutputFormat.setOutputPath(job, newPageRankDb); job.setOutputFormat(MapFileOutputFormat.class); job.setBoolean("mapred.output.compress", false); job.setOutputKeyClass(Text.class); // DIFF: Use IntWritable instead of Inlinks as the output value type. job.setOutputValueClass(IntWritable.class); return job; }