List of usage examples for org.apache.hadoop.mapred JobContextImpl getInputFormatClass
@SuppressWarnings("unchecked") public Class<? extends InputFormat<?, ?>> getInputFormatClass() throws ClassNotFoundException
From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2MapTask.java
License:Apache License
/** {@inheritDoc} */ @SuppressWarnings({ "ConstantConditions", "unchecked" }) @Override/*from w ww . j a v a 2 s. c om*/ public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { OutputFormat outputFormat = null; Exception err = null; JobContextImpl jobCtx = taskCtx.jobContext(); if (taskCtx.taskInfo().hasMapperIndex()) HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex()); else HadoopMapperUtils.clearMapperIndex(); try { HadoopV2Context hadoopCtx = hadoopContext(); InputSplit nativeSplit = hadoopCtx.getInputSplit(); if (nativeSplit == null) throw new IgniteCheckedException("Input split cannot be null."); InputFormat inFormat = ReflectionUtils.newInstance(jobCtx.getInputFormatClass(), hadoopCtx.getConfiguration()); RecordReader reader = inFormat.createRecordReader(nativeSplit, hadoopCtx); reader.initialize(nativeSplit, hadoopCtx); hadoopCtx.reader(reader); HadoopJobInfo jobInfo = taskCtx.job().info(); outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx); Mapper mapper = ReflectionUtils.newInstance(jobCtx.getMapperClass(), hadoopCtx.getConfiguration()); try { mapper.run(new WrappedMapper().getMapContext(hadoopCtx)); taskCtx.onMapperFinished(); } finally { closeWriter(); } commit(outputFormat); } catch (InterruptedException e) { err = e; Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } catch (Exception e) { err = e; throw new IgniteCheckedException(e); } finally { HadoopMapperUtils.clearMapperIndex(); if (err != null) abort(outputFormat); } }
From source file:org.apache.ignite.internal.processors.hadoop.v2.GridHadoopV2MapTask.java
License:Apache License
/** {@inheritDoc} */ @SuppressWarnings({ "ConstantConditions", "unchecked" }) @Override/*from w w w.j a va 2 s .c o m*/ public void run0(GridHadoopV2TaskContext taskCtx) throws IgniteCheckedException { GridHadoopInputSplit split = info().inputSplit(); InputSplit nativeSplit; if (split instanceof GridHadoopFileBlock) { GridHadoopFileBlock block = (GridHadoopFileBlock) split; nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), null); } else nativeSplit = (InputSplit) taskCtx.getNativeSplit(split); assert nativeSplit != null; OutputFormat outputFormat = null; Exception err = null; JobContextImpl jobCtx = taskCtx.jobContext(); try { InputFormat inFormat = ReflectionUtils.newInstance(jobCtx.getInputFormatClass(), hadoopContext().getConfiguration()); RecordReader reader = inFormat.createRecordReader(nativeSplit, hadoopContext()); reader.initialize(nativeSplit, hadoopContext()); hadoopContext().reader(reader); GridHadoopJobInfo jobInfo = taskCtx.job().info(); outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx); Mapper mapper = ReflectionUtils.newInstance(jobCtx.getMapperClass(), hadoopContext().getConfiguration()); try { mapper.run(new WrappedMapper().getMapContext(hadoopContext())); } finally { closeWriter(); } commit(outputFormat); } catch (InterruptedException e) { err = e; Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } catch (Exception e) { err = e; throw new IgniteCheckedException(e); } finally { if (err != null) abort(outputFormat); } }
From source file:org.apache.ignite.internal.processors.hadoop.v2.HadoopV2MapTask.java
License:Apache License
/** {@inheritDoc} */ @SuppressWarnings({ "ConstantConditions", "unchecked" }) @Override//w w w .jav a 2s . c o m public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { HadoopInputSplit split = info().inputSplit(); InputSplit nativeSplit; if (split instanceof HadoopFileBlock) { HadoopFileBlock block = (HadoopFileBlock) split; nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), null); } else nativeSplit = (InputSplit) taskCtx.getNativeSplit(split); assert nativeSplit != null; OutputFormat outputFormat = null; Exception err = null; JobContextImpl jobCtx = taskCtx.jobContext(); try { InputFormat inFormat = ReflectionUtils.newInstance(jobCtx.getInputFormatClass(), hadoopContext().getConfiguration()); RecordReader reader = inFormat.createRecordReader(nativeSplit, hadoopContext()); reader.initialize(nativeSplit, hadoopContext()); hadoopContext().reader(reader); HadoopJobInfo jobInfo = taskCtx.job().info(); outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx); Mapper mapper = ReflectionUtils.newInstance(jobCtx.getMapperClass(), hadoopContext().getConfiguration()); try { mapper.run(new WrappedMapper().getMapContext(hadoopContext())); } finally { closeWriter(); } commit(outputFormat); } catch (InterruptedException e) { err = e; Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } catch (Exception e) { err = e; throw new IgniteCheckedException(e); } finally { if (err != null) abort(outputFormat); } }
From source file:org.gridgain.grid.kernal.processors.hadoop.v2.GridHadoopV2MapTask.java
License:Open Source License
/** {@inheritDoc} */ @SuppressWarnings({ "ConstantConditions", "unchecked" }) @Override//from w ww . j a v a2 s . co m public void run0(GridHadoopV2TaskContext taskCtx) throws GridException { GridHadoopInputSplit split = info().inputSplit(); InputSplit nativeSplit; if (split instanceof GridHadoopFileBlock) { GridHadoopFileBlock block = (GridHadoopFileBlock) split; nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), null); } else nativeSplit = (InputSplit) taskCtx.getNativeSplit(split); assert nativeSplit != null; OutputFormat outputFormat = null; Exception err = null; JobContextImpl jobCtx = taskCtx.jobContext(); try { InputFormat inFormat = ReflectionUtils.newInstance(jobCtx.getInputFormatClass(), hadoopContext().getConfiguration()); RecordReader reader = inFormat.createRecordReader(nativeSplit, hadoopContext()); reader.initialize(nativeSplit, hadoopContext()); hadoopContext().reader(reader); GridHadoopJobInfo jobInfo = taskCtx.job().info(); outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx); Mapper mapper = ReflectionUtils.newInstance(jobCtx.getMapperClass(), hadoopContext().getConfiguration()); try { mapper.run(new WrappedMapper().getMapContext(hadoopContext())); } finally { closeWriter(); } commit(outputFormat); } catch (InterruptedException e) { err = e; Thread.currentThread().interrupt(); throw new GridInterruptedException(e); } catch (Exception e) { err = e; throw new GridException(e); } finally { if (err != null) abort(outputFormat); } }