Example usage for org.apache.hadoop.mapreduce TaskInputOutputContext write

List of usage examples for org.apache.hadoop.mapreduce TaskInputOutputContext write

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskInputOutputContext write.

Prototype

public void write(KEYOUT key, VALUEOUT value) throws IOException, InterruptedException;

Source Link

Document

Generate an output key/value pair.

Usage

From source file:Script.java

License:Open Source License

/** Call the map-reduce Javascript function with the given
 *  arguments.  Save the key-value result in the task's context
 *  @param context   Task context//w w  w .j  a  v a  2  s.  co m
 *  @param script    The Javascript interpreter
 *  @param f         The map-reduce javascript object
 *  @param args      The key-value arguments
 */
@SuppressWarnings("unchecked")
public void dispatchMapReduce(TaskInputOutputContext context, Function f, Scriptable thisObj, Object[] args,
        Tuple key, Tuple value) throws IOException, InterruptedException {
    Object ret = callMapReduce(f, thisObj, args, key, value);
    if (ret instanceof NativeGenerator) {
        NativeGenerator gen = (NativeGenerator) ret;
        Function next = (Function) gen.getProperty(gen, "next");
        while (callMapReduce(next, gen, null, key, value) != null)
            context.write(key, value);
    } else if (ret != null) {
        context.write(key, value);
    }
}

From source file:com.asakusafw.runtime.flow.ResultOutput.java

License:Apache License

/**
 * Writes a key and value into current context.
 * @param context current context// ww  w.java2s  .c  o  m
 * @param key the key object (nullable)
 * @param value the value object (nullable)
 * @param <K> type of key object
 * @param <V> type of value object
 * @throws Result.OutputException if failed to write the objects
 * @since 0.5.0
 */
public static <K, V> void write(TaskInputOutputContext<?, ?, ? super K, ? super V> context, K key, V value) {
    try {
        context.write(key, value);
    } catch (Exception e) {
        throw new Result.OutputException(e);
    }
}

From source file:com.metamx.druid.indexer.DeterminePartitionsJob.java

License:Open Source License

private static void write(
        TaskInputOutputContext<? extends Writable, ? extends Writable, BytesWritable, Text> context,
        final byte[] groupKey, DimValueCount dimValueCount) throws IOException, InterruptedException {
    context.write(
            new SortableBytes(groupKey,
                    tabJoiner.join(dimValueCount.dim, dimValueCount.value)
                            .getBytes(HadoopDruidIndexerConfig.javaNativeCharset)).toBytesWritable(),
            dimValueCount.toText());//  www  .j  a va  2  s .  com
}

From source file:io.druid.indexer.DeterminePartitionsJob.java

License:Apache License

private static void write(TaskInputOutputContext<?, ?, BytesWritable, Text> context, final byte[] groupKey,
        DimValueCount dimValueCount) throws IOException, InterruptedException {
    context.write(
            new SortableBytes(groupKey,
                    tabJoiner.join(dimValueCount.dim, dimValueCount.value)
                            .getBytes(HadoopDruidIndexerConfig.javaNativeCharset)).toBytesWritable(),
            dimValueCount.toText());/*  w  w  w  .j  a  va2s .  c  o  m*/
}

From source file:org.apache.druid.indexer.DeterminePartitionsJob.java

License:Apache License

private static void write(TaskInputOutputContext<?, ?, BytesWritable, Text> context, final byte[] groupKey,
        DimValueCount dimValueCount) throws IOException, InterruptedException {
    byte[] sortKey = TAB_JOINER.join(dimValueCount.dim, dimValueCount.value)
            .getBytes(HadoopDruidIndexerConfig.JAVA_NATIVE_CHARSET);
    context.write(new SortableBytes(groupKey, sortKey).toBytesWritable(), dimValueCount.toText());
}

From source file:org.apache.hcatalog.mapreduce.MultiOutputFormat.java

License:Apache License

/**
 * Write the output key and value using the OutputFormat defined by the
 * alias./*from ww  w.  j av a 2 s .com*/
 *
 * @param alias the name given to the OutputFormat configuration
 * @param key the output key to be written
 * @param value the output value to be written
 * @param context the Mapper or Reducer Context
 * @throws IOException
 * @throws InterruptedException
 */
public static <K, V> void write(String alias, K key, V value, TaskInputOutputContext context)
        throws IOException, InterruptedException {
    KeyValue<K, V> keyval = new KeyValue<K, V>(key, value);
    context.write(new Text(alias), keyval);
}

From source file:simsql.runtime.JoinReducerInnards.java

License:Apache License

public void reduce(RecordKey key, Iterable<RecordWrapper> values,
        TaskInputOutputContext<?, ?, WritableKey, WritableValue> context) {

    // tells us we are joining
    boolean joining = false;

    // used to store all of the records from the first relation
    RecordHashTable tempTable = tempTableR.get();
    if (tempTable == null) {
        tempTable = new RecordHashTable(0.60);
        tempTableR = new SoftReference<RecordHashTable>(tempTable);
    } else {/*from  w w  w  .j ava2  s .  com*/
        tempTable.clear();
    }

    // now we loop through all of the values
    Record firstRecord = null;
    Record firstFromRight = new RightOut();

    for (RecordWrapper r : values) {

        // remember the very first record so we can check its typecode
        if (firstRecord == null) {
            if (joinType == JoinOp.JoinType.NATURAL) {
                firstRecord = r.getWrappedRecord();
            } else {
                firstRecord = firstFromRight;
            }
        }

        // if we have a record with a diffreent typeCode than the first one, it means that
        // we have moved on to the records from the 2nd relation... so we just scan all of 
        // the records in the first relation and output any hits
        if (!joining && (r.getWrappedRecord().getTypeCode() != firstRecord.getTypeCode())) {
            joining = true;
        }

        // if we are currently joining, then compare the current record with the ones from the other relation
        if (joining || myTable != null) {

            // this is where we try to find matches for r1
            Record[] guyToPullFrom;
            HashableRecord next = (HashableRecord) r.getWrappedRecord();

            // there are two cases: either we are doing a full-on, MapReduce join, in which case we will match
            // with everyone who came through the mapper
            if (joining && myTable != null) {
                throw new RuntimeException("How is joining true?");
            } else if (joining) {
                guyToPullFrom = tempTable.find(next.getSecondaryHashKey());
            } else {
                while (key.getKey() > processedThruKey) {
                    loadUpRecordHashTable();
                }
                guyToPullFrom = myTable.find(next.getSecondaryHashKey());
            }

            // is this a natural join?
            if (joinType == JoinOp.JoinType.NATURAL) {

                // loop through all of the matches and run the join
                if (guyToPullFrom != null) {
                    for (Record r1 : guyToPullFrom) {

                        // join the two records
                        Record output = Result.join((AbstractRecord) r1, (AbstractRecord) r.getWrappedRecord());
                        if (output != null) {
                            try {

                                // remember the sort key and send it on its way
                                output.setSortAttribute(key.getKey());
                                context.write(dummy, output);

                                output.recycle();

                            } catch (InterruptedException e) {
                                throw new RuntimeException(
                                        "died when trying to write a join output rec to the output stream (1)");
                            } catch (IOException e) {
                                throw new RuntimeException(
                                        "died when trying to write a join output rec to the output stream (2)");
                            }
                        }
                    }
                }
            }

            // is this a semijoin/antijoin?
            else {

                boolean someonePassed = false;
                boolean testedOne = false;

                Bitstring pred = BitstringWithSingleValue.FALSE;

                if (guyToPullFrom != null) {

                    // not null? then traverse.
                    for (Record r1 : guyToPullFrom) {

                        // test
                        testedOne = true;
                        pred = pred.or(Result.test((AbstractRecord) r.getWrappedRecord(), (AbstractRecord) r1));

                        if (!pred.allAreFalseOrUnknown() && joinType == JoinOp.JoinType.SEMIJOIN) {

                            // if we are a running a semijoin, we write out and break out of the loop.
                            Record output = Result.compose((AbstractRecord) r.getWrappedRecord(),
                                    (AbstractRecord) r1, pred);
                            output.setSortAttribute(key.getKey());
                            try {
                                context.write(dummy, output);
                                output.recycle();
                            } catch (Exception e) {
                                throw new RuntimeException("Failed (1) ", e);
                            }

                            break;
                        }
                    }
                }

                // if we are doing an antijoin and pred is false, it means that we could not exclude
                // the current record and it will be included

                // pred could be unknown here, if the current record had no match... 

                // if it is an antijoin, then we see if we need to produce an output
                if (joinType == JoinOp.JoinType.ANTIJOIN) {

                    // note that we let a record through if the truth value of the NOT EXISTS at this point is UNKNOWN, or
                    // if it is TRUE.  Strictly speaking, this is not correct.  If the NOT EXSITS is UNKNOWN, the record should
                    // not make its way through.  But this was a semi-necessary hack.  The way we deal with anti-join predicates
                    // when the LHS record has no potential match is to create a record with all NULLs and run the anti-join predicate
                    // on the pair.  If we come back with UNKNOWN, then the record survived the predicate.
                    if (!testedOne) {

                        // get a null record and test against it
                        pred = pred.or(Result.test((AbstractRecord) r.getWrappedRecord(),
                                (AbstractRecord) RightOut.getNull()));
                    }

                    if (!pred.allAreTrue()) {
                        Record output = Result.compose((AbstractRecord) r.getWrappedRecord(),
                                (AbstractRecord) r.getWrappedRecord(), pred.theseAreTrue().not());
                        output.setSortAttribute(key.getKey());
                        try {
                            context.write(dummy, output);
                            output.recycle();
                        } catch (Exception e) {
                            throw new RuntimeException("Failed (2)", e);
                        }
                    }
                }
            }

            // remember that we just did a join
            firstRecord = null;
            r.getWrappedRecord().recycle();

            // if we are not currently joining, then add the record into the list
        } else {
            HashableRecord next = (HashableRecord) r.getWrappedRecord();
            tempTable.add(next, next.getSecondaryHashKey());
        }
    }
}