Example usage for org.apache.hadoop.mapred Reducer reduce

List of usage examples for org.apache.hadoop.mapred Reducer reduce

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred Reducer reduce.

Prototype

void reduce(K2 key, Iterator<V2> values, OutputCollector<K3, V3> output, Reporter reporter) throws IOException;

Source Link

Document

Reduces values for a given key.

Usage

From source file:org.apache.ignite.internal.processors.hadoop.impl.v1.HadoopV1ReduceTask.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override//from w w w  .ja va2 s  .  com
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopJobEx job = taskCtx.job();

    HadoopV2TaskContext taskCtx0 = (HadoopV2TaskContext) taskCtx;

    if (!reduce && taskCtx.taskInfo().hasMapperIndex())
        HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex());
    else
        HadoopMapperUtils.clearMapperIndex();

    try {
        JobConf jobConf = taskCtx0.jobConf();

        HadoopTaskInput input = taskCtx.input();

        HadoopV1OutputCollector collector = null;

        try {
            collector = collector(jobConf, taskCtx0, reduce || !job.info().hasReducer(), fileName(),
                    taskCtx0.attemptId());

            Reducer reducer;
            if (reduce)
                reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(), jobConf);
            else
                reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(), jobConf);

            assert reducer != null;

            try {
                try {
                    while (input.next()) {
                        if (isCancelled())
                            throw new HadoopTaskCancelledException("Reduce task cancelled.");

                        reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
                    }

                    if (!reduce)
                        taskCtx.onMapperFinished();
                } finally {
                    reducer.close();
                }
            } finally {
                collector.closeWriter();
            }

            collector.commit();
        } catch (Exception e) {
            if (collector != null)
                collector.abort();

            throw new IgniteCheckedException(e);
        }
    } finally {
        if (!reduce)
            HadoopMapperUtils.clearMapperIndex();
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.v1.GridHadoopV1ReduceTask.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override/*w w w  .jav a2s  .c om*/
public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException {
    GridHadoopJob job = taskCtx.job();

    GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext) taskCtx;

    JobConf jobConf = ctx.jobConf();

    GridHadoopTaskInput input = taskCtx.input();

    GridHadoopV1OutputCollector collector = null;

    try {
        collector = collector(jobConf, ctx, reduce || !job.info().hasReducer(), fileName(), ctx.attemptId());

        Reducer reducer = ReflectionUtils
                .newInstance(reduce ? jobConf.getReducerClass() : jobConf.getCombinerClass(), jobConf);

        assert reducer != null;

        try {
            try {
                while (input.next()) {
                    if (isCancelled())
                        throw new GridHadoopTaskCancelledException("Reduce task cancelled.");

                    reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
                }
            } finally {
                reducer.close();
            }
        } finally {
            collector.closeWriter();
        }

        collector.commit();
    } catch (Exception e) {
        if (collector != null)
            collector.abort();

        throw new IgniteCheckedException(e);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.v1.HadoopV1ReduceTask.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override/* w w w.  j  a v a2  s.  co m*/
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopJob job = taskCtx.job();

    HadoopV2TaskContext ctx = (HadoopV2TaskContext) taskCtx;

    JobConf jobConf = ctx.jobConf();

    HadoopTaskInput input = taskCtx.input();

    HadoopV1OutputCollector collector = null;

    try {
        collector = collector(jobConf, ctx, reduce || !job.info().hasReducer(), fileName(), ctx.attemptId());

        Reducer reducer;
        if (reduce)
            reducer = ReflectionUtils.newInstance(jobConf.getReducerClass(), jobConf);
        else
            reducer = ReflectionUtils.newInstance(jobConf.getCombinerClass(), jobConf);

        assert reducer != null;

        try {
            try {
                while (input.next()) {
                    if (isCancelled())
                        throw new HadoopTaskCancelledException("Reduce task cancelled.");

                    reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
                }
            } finally {
                reducer.close();
            }
        } finally {
            collector.closeWriter();
        }

        collector.commit();
    } catch (Exception e) {
        if (collector != null)
            collector.abort();

        throw new IgniteCheckedException(e);
    }
}

From source file:org.apache.tez.mapreduce.combine.MRCombiner.java

License:Apache License

private void runOldCombiner(final TezRawKeyValueIterator rawIter, final Writer writer) throws IOException {
    Class<? extends Reducer> reducerClazz = (Class<? extends Reducer>) conf.getClass("mapred.combiner.class",
            null, Reducer.class);

    Reducer combiner = ReflectionUtils.newInstance(reducerClazz, conf);

    OutputCollector collector = new OutputCollector() {
        @Override//from   w  w  w  .  ja  v a  2 s .  co  m
        public void collect(Object key, Object value) throws IOException {
            writer.append(key, value);
        }
    };

    CombinerValuesIterator values = new CombinerValuesIterator(rawIter, keyClass, valClass, comparator);

    while (values.moveToNext()) {
        combiner.reduce(values.getKey(), values.getValues().iterator(), collector, reporter);
    }
}

From source file:org.apache.tez.mapreduce.processor.reduce.ReduceProcessor.java

License:Apache License

void runOldReducer(JobConf job, final MRTaskReporter reporter, KeyValuesReader input, RawComparator comparator,
        Class keyClass, Class valueClass, final KeyValueWriter output)
        throws IOException, InterruptedException {

    Reducer reducer = ReflectionUtils.newInstance(job.getReducerClass(), job);

    // make output collector

    OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value) throws IOException {
            output.write(key, value);/* w  ww  . j  a v  a  2 s  .co m*/
        }
    };

    // apply reduce function
    try {
        ReduceValuesIterator values = new ReduceValuesIterator(input, reporter, reduceInputValueCounter);

        values.informReduceProgress();
        while (values.more()) {
            reduceInputKeyCounter.increment(1);
            reducer.reduce(values.getKey(), values, collector, reporter);
            values.informReduceProgress();
        }

        // Set progress to 1.0f if there was no exception,
        reporter.setProgress(1.0f);

        //Clean up: repeated in catch block below
        reducer.close();
        //End of clean up.
    } catch (IOException ioe) {
        try {
            reducer.close();
        } catch (IOException ignored) {
        }

        throw ioe;
    }
}

From source file:org.gridgain.grid.kernal.processors.hadoop.v1.GridHadoopV1ReduceTask.java

License:Open Source License

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override/*from www.ja  v  a2  s .  co m*/
public void run(GridHadoopTaskContext taskCtx) throws GridException {
    GridHadoopJob job = taskCtx.job();

    GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext) taskCtx;

    JobConf jobConf = ctx.jobConf();

    GridHadoopTaskInput input = taskCtx.input();

    GridHadoopV1OutputCollector collector = null;

    try {
        collector = collector(jobConf, ctx, reduce || !job.info().hasReducer(), fileName(), ctx.attemptId());

        Reducer reducer = ReflectionUtils
                .newInstance(reduce ? jobConf.getReducerClass() : jobConf.getCombinerClass(), jobConf);

        assert reducer != null;

        try {
            try {
                while (input.next()) {
                    if (isCancelled())
                        throw new GridHadoopTaskCancelledException("Reduce task cancelled.");

                    reducer.reduce(input.key(), input.values(), collector, Reporter.NULL);
                }
            } finally {
                reducer.close();
            }
        } finally {
            collector.closeWriter();
        }

        collector.commit();
    } catch (Exception e) {
        if (collector != null)
            collector.abort();

        throw new GridException(e);
    }
}