List of usage examples for org.apache.hadoop.mapred TaskAttemptContextImpl TaskAttemptContextImpl
public TaskAttemptContextImpl(Configuration conf, TaskAttemptID taskId)
From source file:com.zjy.mongo.mapred.output.MongoRecordWriter.java
License:Apache License
/** * Create a new MongoRecordWriter./*from ww w . j a v a 2 s .c om*/ * @param conf the job configuration */ public MongoRecordWriter(final JobConf conf) { super(Collections.<DBCollection>emptyList(), new TaskAttemptContextImpl(conf, TaskAttemptID.forName(conf.get("mapred.task.id")))); configuration = conf; }
From source file:org.apache.carbondata.presto.CarbondataRecordSetProvider.java
License:Apache License
@Override public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<? extends ColumnHandle> columns) { CarbondataSplit carbondataSplit = checkType(split, CarbondataSplit.class, "split is not class CarbondataSplit"); checkArgument(carbondataSplit.getConnectorId().equals(connectorId), "split is not for this connector"); CarbonProjection carbonProjection = new CarbonProjection(); // Convert all columns handles ImmutableList.Builder<CarbondataColumnHandle> handles = ImmutableList.builder(); for (ColumnHandle handle : columns) { handles.add(checkType(handle, CarbondataColumnHandle.class, "handle")); carbonProjection.addColumn(((CarbondataColumnHandle) handle).getColumnName()); }//from w ww .j ava 2 s . co m CarbonTableCacheModel tableCacheModel = carbonTableReader .getCarbonCache(carbondataSplit.getSchemaTableName()); checkNotNull(tableCacheModel, "tableCacheModel should not be null"); checkNotNull(tableCacheModel.carbonTable, "tableCacheModel.carbonTable should not be null"); checkNotNull(tableCacheModel.tableInfo, "tableCacheModel.tableInfo should not be null"); // Build Query Model CarbonTable targetTable = tableCacheModel.carbonTable; QueryModel queryModel = null; try { Configuration conf = new Configuration(); conf.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, ""); String carbonTablePath = PathFactory.getInstance() .getCarbonTablePath(targetTable.getAbsoluteTableIdentifier().getStorePath(), targetTable.getCarbonTableIdentifier(), null) .getPath(); conf.set(CarbonTableInputFormat.INPUT_DIR, carbonTablePath); JobConf jobConf = new JobConf(conf); CarbonTableInputFormat carbonTableInputFormat = createInputFormat(jobConf, tableCacheModel.carbonTable, PrestoFilterUtil.getFilters(targetTable.getFactTableName().hashCode()), carbonProjection); TaskAttemptContextImpl hadoopAttemptContext = new TaskAttemptContextImpl(jobConf, new TaskAttemptID("", 1, TaskType.MAP, 0, 0)); CarbonInputSplit carbonInputSplit = CarbonLocalInputSplit .convertSplit(carbondataSplit.getLocalInputSplit()); queryModel = carbonTableInputFormat.getQueryModel(carbonInputSplit, hadoopAttemptContext); } catch (IOException e) { throw new RuntimeException("Unable to get the Query Model ", e); } return new CarbondataRecordSet(targetTable, session, carbondataSplit, handles.build(), queryModel); }
From source file:org.apache.flink.batch.connectors.hive.HiveTableOutputFormat.java
License:Apache License
@Override public void open(int taskNumber, int numTasks) throws IOException { try {//w w w .java2s . c om StorageDescriptor sd = hiveTablePartition.getStorageDescriptor(); serializer = (AbstractSerDe) Class.forName(sd.getSerdeInfo().getSerializationLib()).newInstance(); ReflectionUtils.setConf(serializer, jobConf); // TODO: support partition properties, for now assume they're same as table properties SerDeUtils.initializeSerDe(serializer, jobConf, tblProperties, null); outputClass = serializer.getSerializedClass(); } catch (IllegalAccessException | SerDeException | InstantiationException | ClassNotFoundException e) { throw new FlinkRuntimeException("Error initializing Hive serializer", e); } TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_" + String.format("%" + (6 - Integer.toString(taskNumber).length()) + "s", " ").replace(" ", "0") + taskNumber + "_0"); this.jobConf.set("mapred.task.id", taskAttemptID.toString()); this.jobConf.setInt("mapred.task.partition", taskNumber); // for hadoop 2.2 this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString()); this.jobConf.setInt("mapreduce.task.partition", taskNumber); this.context = new TaskAttemptContextImpl(this.jobConf, taskAttemptID); if (!isDynamicPartition) { staticWriter = writerForLocation(hiveTablePartition.getStorageDescriptor().getLocation()); } List<ObjectInspector> objectInspectors = new ArrayList<>(); for (int i = 0; i < rowTypeInfo.getArity() - partitionCols.size(); i++) { objectInspectors.add(HiveTableUtil .getObjectInspector(LegacyTypeInfoDataTypeConverter.toDataType(rowTypeInfo.getTypeAt(i)))); } if (!isPartitioned) { rowObjectInspector = ObjectInspectorFactory .getStandardStructObjectInspector(Arrays.asList(rowTypeInfo.getFieldNames()), objectInspectors); numNonPartitionCols = rowTypeInfo.getArity(); } else { rowObjectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(Arrays .asList(rowTypeInfo.getFieldNames()).subList(0, rowTypeInfo.getArity() - partitionCols.size()), objectInspectors); numNonPartitionCols = rowTypeInfo.getArity() - partitionCols.size(); } }
From source file:org.apache.hive.hcatalog.mapreduce.HCatMapRedUtils.java
License:Apache License
public static org.apache.hadoop.mapred.TaskAttemptContext getOldTaskAttemptContext(TaskAttemptContext context) { return new TaskAttemptContextImpl(new JobConf(context.getConfiguration()), getTaskAttemptID(context)); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.v1.HadoopV1OutputCollector.java
License:Apache License
/** * Setup task.//from w w w . j a va 2 s.c om * * @throws IOException If failed. */ public void setup() throws IOException { if (writer != null) jobConf.getOutputCommitter().setupTask(new TaskAttemptContextImpl(jobConf, attempt)); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.v1.HadoopV1OutputCollector.java
License:Apache License
/** * Commit task.//from w w w. j a v a2 s . c o m * * @throws IOException In failed. */ public void commit() throws IOException { if (writer != null) { OutputCommitter outputCommitter = jobConf.getOutputCommitter(); TaskAttemptContext taskCtx = new TaskAttemptContextImpl(jobConf, attempt); if (outputCommitter.needsTaskCommit(taskCtx)) outputCommitter.commitTask(taskCtx); } }
From source file:org.apache.ignite.internal.processors.hadoop.impl.v1.HadoopV1OutputCollector.java
License:Apache License
/** * Abort task.// www.j a v a2 s . c o m */ public void abort() { try { if (writer != null) jobConf.getOutputCommitter().abortTask(new TaskAttemptContextImpl(jobConf, attempt)); } catch (IOException ignore) { // No-op. } }