List of usage examples for org.apache.hadoop.mapreduce TaskAttemptID forName
public static TaskAttemptID forName(String str) throws IllegalArgumentException
From source file:org.apache.cassandra.hadoop.cql3.CqlPagingInputFormat.java
License:Apache License
public RecordReader<Map<String, ByteBuffer>, Map<String, ByteBuffer>> getRecordReader(InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException { TaskAttemptContext tac = new TaskAttemptContext(jobConf, TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID))) { @Override/* w w w. ja v a2s .c o m*/ public void progress() { reporter.progress(); } }; CqlPagingRecordReader recordReader = new CqlPagingRecordReader(); recordReader.initialize((org.apache.hadoop.mapreduce.InputSplit) split, tac); return recordReader; }
From source file:org.apache.cassandra.hadoop2.ColumnFamilyInputFormat.java
License:Apache License
public org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> getRecordReader( org.apache.hadoop.mapred.InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException { TaskAttemptContext tac = new TaskAttemptContextImpl(jobConf, TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID))) { @Override// ww w . ja va 2 s.c o m public void progress() { reporter.progress(); } }; ColumnFamilyRecordReader recordReader = new ColumnFamilyRecordReader( jobConf.getInt(CASSANDRA_HADOOP_MAX_KEY_SIZE, CASSANDRA_HADOOP_MAX_KEY_SIZE_DEFAULT)); recordReader.initialize((org.apache.hadoop.mapreduce.InputSplit) split, tac); return recordReader; }
From source file:org.apache.cassandra.hadoop2.cql3.CqlPagingInputFormat.java
License:Apache License
public RecordReader<Map<String, ByteBuffer>, Map<String, ByteBuffer>> getRecordReader(InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException { TaskAttemptContext tac = new TaskAttemptContextImpl(jobConf, TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID))) { @Override/*from w w w . j a va 2 s.c om*/ public void progress() { reporter.progress(); } }; CqlPagingRecordReader recordReader = new CqlPagingRecordReader(); recordReader.initialize((org.apache.hadoop.mapreduce.InputSplit) split, tac); return recordReader; }
From source file:org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormatBase.java
License:Apache License
/** * create the temporary output file for hadoop RecordWriter. * @param taskNumber The number of the parallel instance. * @param numTasks The number of parallel tasks. * @throws java.io.IOException/*from w w w. j ava 2s .c o m*/ */ @Override public void open(int taskNumber, int numTasks) throws IOException { // enforce sequential open() calls synchronized (OPEN_MUTEX) { if (Integer.toString(taskNumber + 1).length() > 6) { throw new IOException("Task id too large."); } this.taskNumber = taskNumber + 1; // for hadoop 2.2 this.configuration.set("mapreduce.output.basename", "tmp"); TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_" + String .format("%" + (6 - Integer.toString(taskNumber + 1).length()) + "s", " ").replace(" ", "0") + Integer.toString(taskNumber + 1) + "_0"); this.configuration.set("mapred.task.id", taskAttemptID.toString()); this.configuration.setInt("mapred.task.partition", taskNumber + 1); // for hadoop 2.2 this.configuration.set("mapreduce.task.attempt.id", taskAttemptID.toString()); this.configuration.setInt("mapreduce.task.partition", taskNumber + 1); try { this.context = HadoopUtils.instantiateTaskAttemptContext(this.configuration, taskAttemptID); this.outputCommitter = this.mapreduceOutputFormat.getOutputCommitter(this.context); this.outputCommitter.setupJob(HadoopUtils.instantiateJobContext(this.configuration, new JobID())); } catch (Exception e) { throw new RuntimeException(e); } this.context.getCredentials().addAll(this.credentials); Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser()); if (currentUserCreds != null) { this.context.getCredentials().addAll(currentUserCreds); } // compatible for hadoop 2.2.0, the temporary output directory is different from hadoop 1.2.1 if (outputCommitter instanceof FileOutputCommitter) { this.configuration.set("mapreduce.task.output.dir", ((FileOutputCommitter) this.outputCommitter).getWorkPath().toString()); } try { this.recordWriter = this.mapreduceOutputFormat.getRecordWriter(this.context); } catch (InterruptedException e) { throw new IOException("Could not create RecordWriter.", e); } } }
From source file:org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormatBase.java
License:Apache License
@Override public void finalizeGlobal(int parallelism) throws IOException { JobContext jobContext;/*w w w .j a v a2 s . c om*/ TaskAttemptContext taskContext; try { TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_" + String.format("%" + (6 - Integer.toString(1).length()) + "s", " ").replace(" ", "0") + Integer.toString(1) + "_0"); jobContext = HadoopUtils.instantiateJobContext(this.configuration, new JobID()); taskContext = HadoopUtils.instantiateTaskAttemptContext(this.configuration, taskAttemptID); this.outputCommitter = this.mapreduceOutputFormat.getOutputCommitter(taskContext); } catch (Exception e) { throw new RuntimeException(e); } jobContext.getCredentials().addAll(this.credentials); Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser()); if (currentUserCreds != null) { jobContext.getCredentials().addAll(currentUserCreds); } // finalize HDFS output format if (this.outputCommitter != null) { this.outputCommitter.commitJob(jobContext); } }
From source file:org.apache.flink.hadoopcompatibility.mapreduce.HadoopOutputFormat.java
License:Apache License
/** * create the temporary output file for hadoop RecordWriter. * @param taskNumber The number of the parallel instance. * @param numTasks The number of parallel tasks. * @throws IOException/* w w w. ja v a 2 s . com*/ */ @Override public void open(int taskNumber, int numTasks) throws IOException { if (Integer.toString(taskNumber + 1).length() > 6) { throw new IOException("Task id too large."); } // for hadoop 2.2 this.configuration.set("mapreduce.output.basename", "tmp"); TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_" + String.format("%" + (6 - Integer.toString(taskNumber + 1).length()) + "s", " ").replace(" ", "0") + Integer.toString(taskNumber + 1) + "_0"); this.configuration.set("mapred.task.id", taskAttemptID.toString()); this.configuration.setInt("mapred.task.partition", taskNumber + 1); // for hadoop 2.2 this.configuration.set("mapreduce.task.attempt.id", taskAttemptID.toString()); this.configuration.setInt("mapreduce.task.partition", taskNumber + 1); try { this.context = HadoopUtils.instantiateTaskAttemptContext(this.configuration, taskAttemptID); } catch (Exception e) { throw new RuntimeException(e); } this.fileOutputCommitter = new FileOutputCommitter(new Path(this.configuration.get("mapred.output.dir")), context); try { this.fileOutputCommitter.setupJob(HadoopUtils.instantiateJobContext(this.configuration, new JobID())); } catch (Exception e) { throw new RuntimeException(e); } // compatible for hadoop 2.2.0, the temporary output directory is different from hadoop 1.2.1 this.configuration.set("mapreduce.task.output.dir", this.fileOutputCommitter.getWorkPath().toString()); try { this.recordWriter = this.mapreduceOutputFormat.getRecordWriter(this.context); } catch (InterruptedException e) { throw new IOException("Could not create RecordWriter.", e); } }
From source file:org.apache.pig.backend.hadoop.streaming.HadoopExecutableManager.java
License:Apache License
/** * Should the stderr data of this task be persisted on HDFS? * //from w w w. j ava 2 s .c o m * @param limit maximum number of tasks whose stderr log-files are persisted * @param taskId id of the task * @return <code>true</code> if stderr data of task should be persisted on * HDFS, <code>false</code> otherwise */ private boolean writeErrorToHDFS(int limit, String taskId) { if (command.getPersistStderr() && taskId != null) { int tipId = TaskAttemptID.forName(taskId).getTaskID().getId(); return tipId < command.getLogFilesLimit(); } return false; }