List of usage examples for org.apache.hadoop.mapreduce Counter getValue
long getValue();
From source file:main.okapi.utils.Counters.java
License:Apache License
/** * Replaces the value of a counter with a new one. * //from w ww.j av a 2 s. co m * @param context * @param counterGroup * @param counterName * @param newValue */ public static void updateCounter(Context context, String counterGroup, String counterName, long newValue) { Counter counter = context.getCounter(counterGroup, counterName); long oldValue = counter.getValue(); counter.increment(newValue - oldValue); }
From source file:mvm.rya.accumulo.pig.IndexWritingTool.java
License:Apache License
@Override public int run(final String[] args) throws Exception { Preconditions.checkArgument(args.length == 7, "java " + IndexWritingTool.class.getCanonicalName() + " hdfsSaveLocation sparqlFile cbinstance cbzk cbuser cbpassword rdfTablePrefix."); final String inputDir = args[0]; final String sparqlFile = args[1]; final String instStr = args[2]; final String zooStr = args[3]; final String userStr = args[4]; final String passStr = args[5]; final String tablePrefix = args[6]; String sparql = FileUtils.readFileToString(new File(sparqlFile)); Job job = new Job(getConf(), "Write HDFS Index to Accumulo"); job.setJarByClass(this.getClass()); Configuration jobConf = job.getConfiguration(); jobConf.setBoolean("mapred.map.tasks.speculative.execution", false); setVarOrders(sparql, jobConf);// w w w .ja v a 2 s. c o m TextInputFormat.setInputPaths(job, inputDir); job.setInputFormatClass(TextInputFormat.class); job.setMapperClass(MyMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Mutation.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Mutation.class); job.setNumReduceTasks(0); String tableName; if (zooStr.equals("mock")) { tableName = tablePrefix; } else { tableName = tablePrefix + "INDEX_" + UUID.randomUUID().toString().replace("-", "").toUpperCase(); } setAccumuloOutput(instStr, zooStr, userStr, passStr, job, tableName); jobConf.set(sparql_key, sparql); int complete = job.waitForCompletion(true) ? 0 : -1; if (complete == 0) { String[] varOrders = jobConf.getStrings("varOrders"); String orders = Joiner.on("\u0000").join(varOrders); Instance inst; if (zooStr.equals("mock")) { inst = new MockInstance(instStr); } else { inst = new ZooKeeperInstance(instStr, zooStr); } Connector conn = inst.getConnector(userStr, passStr.getBytes()); BatchWriter bw = conn.createBatchWriter(tableName, 10, 5000, 1); Counters counters = job.getCounters(); Counter c1 = counters.findCounter(cardCounter, cardCounter); Mutation m = new Mutation("~SPARQL"); Value v = new Value(sparql.getBytes()); m.put(new Text("" + c1.getValue()), new Text(orders), v); bw.addMutation(m); bw.close(); return complete; } else { return complete; } }
From source file:org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.MapReduceJobHistoryUpdater.java
License:Apache License
private void processJobFinishedEvent(PreparedStatement entityPS, PreparedStatement workflowUpdateNumCompletedPS, LoggingEvent logEvent, JobFinishedEvent historyEvent) { Counters counters = historyEvent.getMapCounters(); long inputBytes = 0; if (counters != null) { for (CounterGroup group : counters) { for (Counter counter : group) { if (counter.getName().equals("HDFS_BYTES_READ")) inputBytes += counter.getValue(); }/*from ww w . j a va 2 s . c om*/ } } if (historyEvent.getFinishedReduces() != 0) counters = historyEvent.getReduceCounters(); long outputBytes = 0; if (counters != null) { for (CounterGroup group : counters) { for (Counter counter : group) { if (counter.getName().equals("HDFS_BYTES_WRITTEN")) outputBytes += counter.getValue(); } } } try { entityPS.setLong(1, historyEvent.getFinishTime()); entityPS.setInt(2, historyEvent.getFinishedMaps()); entityPS.setInt(3, historyEvent.getFinishedReduces()); entityPS.setInt(4, historyEvent.getFailedMaps()); entityPS.setInt(5, historyEvent.getFailedReduces()); entityPS.setLong(6, inputBytes); entityPS.setLong(7, outputBytes); entityPS.setString(8, historyEvent.getJobid().toString()); entityPS.executeUpdate(); // job finished events always have success status workflowUpdateNumCompletedPS.setLong(1, historyEvent.getFinishTime()); workflowUpdateNumCompletedPS.setLong(2, historyEvent.getFinishTime()); workflowUpdateNumCompletedPS.setString(3, historyEvent.getJobid().toString()); workflowUpdateNumCompletedPS.executeUpdate(); } catch (SQLException sqle) { LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + historyEvent.getJobid() + " into " + JOB_TABLE, sqle); } updateJobStatsAtFinish(historyEvent.getJobid().toString()); }
From source file:org.apache.blur.mapreduce.lib.BlurInputFormatTest.java
License:Apache License
private void assertMapTask(int i, Counters counters) { for (CounterGroup counterGroup : counters) { String name = counterGroup.getName(); boolean jobCounterGroup = false; if (name.equals("org.apache.hadoop.mapreduce.JobCounter")) { jobCounterGroup = true;/*from w w w. j a v a 2 s. c om*/ } else if (name.equals("org.apache.hadoop.mapred.JobInProgress$Counter")) { jobCounterGroup = true; } if (jobCounterGroup) { for (Counter counter : counterGroup) { if (counter.getName().equals("TOTAL_LAUNCHED_MAPS")) { assertEquals(1, counter.getValue()); return; } } } } fail(); }
From source file:org.apache.crunch.impl.mem.CountersWrapper.java
License:Apache License
public synchronized void incrAllCounters(Counters other) { for (CounterGroup cg : other) { for (Counter c : cg) { findCounter(cg.getName(), c.getName()).increment(c.getValue()); }//from w w w. ja v a 2s. c om } }
From source file:org.apache.druid.indexer.HadoopDruidIndexerMapper.java
License:Apache License
private void handleParseException(ParseException pe, Context context) { context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER).increment(1); Counter unparseableCounter = context .getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER); Counter processedWithErrorsCounter = context .getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER); if (pe.isFromPartiallyValidRow()) { processedWithErrorsCounter.increment(1); } else {/*from ww w . jav a 2 s . c o m*/ unparseableCounter.increment(1); } if (config.isLogParseExceptions()) { log.error(pe, "Encountered parse exception: "); } long rowsUnparseable = unparseableCounter.getValue(); long rowsProcessedWithError = processedWithErrorsCounter.getValue(); if (rowsUnparseable + rowsProcessedWithError > config.getMaxParseExceptions()) { log.error("Max parse exceptions exceeded, terminating task..."); throw new RuntimeException("Max parse exceptions exceeded, terminating task...", pe); } }
From source file:org.apache.druid.indexer.IndexGeneratorJob.java
License:Apache License
@Override public boolean run() { try {/*from w w w . j a v a 2 s.c o m*/ job = Job.getInstance(new Configuration(), StringUtils.format("%s-index-generator-%s", config.getDataSource(), config.getIntervals())); job.getConfiguration().set("io.sort.record.percent", "0.23"); JobHelper.injectSystemProperties(job); config.addJobProperties(job); // inject druid properties like deep storage bindings JobHelper.injectDruidProperties(job.getConfiguration(), config.getAllowedHadoopPrefix()); job.setMapperClass(IndexGeneratorMapper.class); job.setMapOutputValueClass(BytesWritable.class); SortableBytes.useSortableBytesAsMapOutputKey(job, IndexGeneratorPartitioner.class); int numReducers = Iterables.size(config.getAllBuckets().get()); if (numReducers == 0) { throw new RuntimeException("No buckets?? seems there is no data to index."); } if (config.getSchema().getTuningConfig().getUseCombiner()) { job.setCombinerClass(IndexGeneratorCombiner.class); job.setCombinerKeyGroupingComparatorClass(BytesWritable.Comparator.class); } job.setNumReduceTasks(numReducers); setReducerClass(job); job.setOutputKeyClass(BytesWritable.class); job.setOutputValueClass(Text.class); job.setOutputFormatClass(IndexGeneratorOutputFormat.class); FileOutputFormat.setOutputPath(job, config.makeIntermediatePath()); config.addInputPaths(job); config.intoConfiguration(job); JobHelper.setupClasspath(JobHelper.distributedClassPath(config.getWorkingPath()), JobHelper.distributedClassPath(config.makeIntermediatePath()), job); job.submit(); log.info("Job %s submitted, status available at %s", job.getJobName(), job.getTrackingURL()); // Store the jobId in the file if (job.getJobID() != null) { JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), job.getJobID().toString()); } try { boolean success = job.waitForCompletion(true); Counters counters = job.getCounters(); if (counters == null) { log.info("No counters found for job [%s]", job.getJobName()); } else { Counter invalidRowCount = counters .findCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER); if (invalidRowCount != null) { jobStats.setInvalidRowCount(invalidRowCount.getValue()); } else { log.info("No invalid row counter found for job [%s]", job.getJobName()); } } return success; } catch (IOException ioe) { if (!Utils.checkAppSuccessForJobIOException(ioe, job, config.isUseYarnRMJobStatusFallback())) { throw ioe; } else { return true; } } } catch (Exception e) { throw new RuntimeException(e); } }
From source file:org.apache.falcon.job.HiveReplicationCounters.java
License:Apache License
private void populateCustomCountersMap(Counters jobCounters) { for (ReplicationJobCountersList counterVal : ReplicationJobCountersList.values()) { if (counterVal == ReplicationJobCountersList.TIMETAKEN) { continue; }/* w w w.ja va 2s. co m*/ Counter counter = jobCounters.findCounter(counterVal); if (counter != null) { String counterName = counter.getName(); long counterValue = counter.getValue(); countersMap.put(counterName, counterValue); } } }
From source file:org.apache.falcon.job.JobCounters.java
License:Apache License
protected void populateReplicationCountersMap(Counters jobCounters) { for (CopyMapper.Counter copyCounterVal : CopyMapper.Counter.values()) { if (ReplicationJobCountersList.getCountersKey(copyCounterVal.name()) != null) { Counter counter = jobCounters.findCounter(copyCounterVal); if (counter != null) { String counterName = counter.getName(); long counterValue = counter.getValue(); countersMap.put(counterName, counterValue); }/* w w w .ja va2 s. co m*/ } } }
From source file:org.apache.gobblin.compaction.action.CompactionCompleteFileOperationAction.java
License:Apache License
/** * Replace or append the destination folder with new files from map-reduce job * Create a record count file containing the number of records that have been processed . *//*from w w w. jav a 2s . c om*/ public void onCompactionJobComplete(FileSystemDataset dataset) throws IOException { if (configurator != null && configurator.isJobCreated()) { CompactionPathParser.CompactionParserResult result = new CompactionPathParser(state).parse(dataset); Path tmpPath = configurator.getMrOutputPath(); Path dstPath = new Path(result.getDstAbsoluteDir()); // this is append delta mode due to the compaction rename source dir mode being enabled boolean appendDeltaOutput = this.state.getPropAsBoolean( MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED, MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED); Job job = this.configurator.getConfiguredJob(); long newTotalRecords = 0; long oldTotalRecords = helper.readRecordCount(new Path(result.getDstAbsoluteDir())); long executeCount = helper.readExecutionCount(new Path(result.getDstAbsoluteDir())); List<Path> goodPaths = CompactionJobConfigurator.getGoodFiles(job, tmpPath, this.fs, ImmutableList.of(configurator.getFileExtension())); if (appendDeltaOutput) { FsPermission permission = HadoopUtils.deserializeFsPermission(this.state, MRCompactorJobRunner.COMPACTION_JOB_OUTPUT_DIR_PERMISSION, FsPermission.getDefault()); WriterUtils.mkdirsWithRecursivePermission(this.fs, dstPath, permission); // append files under mr output to destination for (Path filePath : goodPaths) { String fileName = filePath.getName(); log.info(String.format("Adding %s to %s", filePath.toString(), dstPath)); Path outPath = new Path(dstPath, fileName); if (!this.fs.rename(filePath, outPath)) { throw new IOException( String.format("Unable to move %s to %s", filePath.toString(), outPath.toString())); } } // Obtain record count from input file names. // We don't get record count from map-reduce counter because in the next run, the threshold (delta record) // calculation is based on the input file names. By pre-defining which input folders are involved in the // MR execution, it is easy to track how many files are involved in MR so far, thus calculating the number of total records // (all previous run + current run) is possible. newTotalRecords = this.configurator.getFileNameRecordCount(); } else { this.fs.delete(dstPath, true); FsPermission permission = HadoopUtils.deserializeFsPermission(this.state, MRCompactorJobRunner.COMPACTION_JOB_OUTPUT_DIR_PERMISSION, FsPermission.getDefault()); WriterUtils.mkdirsWithRecursivePermission(this.fs, dstPath.getParent(), permission); if (!this.fs.rename(tmpPath, dstPath)) { throw new IOException(String.format("Unable to move %s to %s", tmpPath, dstPath)); } // Obtain record count from map reduce job counter // We don't get record count from file name because tracking which files are actually involved in the MR execution can // be hard. This is due to new minutely data is rolled up to hourly folder but from daily compaction perspective we are not // able to tell which file are newly added (because we simply pass all hourly folders to MR job instead of individual files). Counter counter = job.getCounters().findCounter(RecordKeyMapperBase.EVENT_COUNTER.RECORD_COUNT); newTotalRecords = counter.getValue(); } State compactState = helper.loadState(new Path(result.getDstAbsoluteDir())); compactState.setProp(CompactionSlaEventHelper.RECORD_COUNT_TOTAL, Long.toString(newTotalRecords)); compactState.setProp(CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executeCount + 1)); compactState.setProp(CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString()); helper.saveState(new Path(result.getDstAbsoluteDir()), compactState); log.info("Updating record count from {} to {} in {} [{}]", oldTotalRecords, newTotalRecords, dstPath, executeCount + 1); // submit events for record count if (eventSubmitter != null) { Map<String, String> eventMetadataMap = ImmutableMap.of(CompactionSlaEventHelper.DATASET_URN, dataset.datasetURN(), CompactionSlaEventHelper.RECORD_COUNT_TOTAL, Long.toString(newTotalRecords), CompactionSlaEventHelper.PREV_RECORD_COUNT_TOTAL, Long.toString(oldTotalRecords), CompactionSlaEventHelper.EXEC_COUNT_TOTAL, Long.toString(executeCount + 1), CompactionSlaEventHelper.MR_JOB_ID, this.configurator.getConfiguredJob().getJobID().toString()); this.eventSubmitter.submit(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT, eventMetadataMap); } } }