Example usage for org.apache.hadoop.io WritableFactories newInstance

List of usage examples for org.apache.hadoop.io WritableFactories newInstance

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableFactories newInstance.

Prototype

public static Writable newInstance(Class<? extends Writable> c) 

Source Link

Document

Create a new instance of a class with a defined factory.

Usage

From source file:com.cloudera.crunch.type.writable.GenericArrayWritable.java

License:Open Source License

public void readFields(DataInput in) throws IOException {
    values = new Writable[in.readInt()]; // construct values
    if (values.length > 0) {
        String valueType = Text.readString(in);
        setValueType(valueType);/*from w  w  w  . j av  a2s  . co  m*/
        for (int i = 0; i < values.length; i++) {
            Writable value = WritableFactories.newInstance(valueClass);
            value.readFields(in); // read a value
            values[i] = value; // store it in values
        }
    }
}

From source file:com.moz.fiji.hive.io.FijiCellWritable.java

License:Apache License

/**
 * Reads and converts data according to the specified schema.
 *
 * @param in DataInput to deserialize this object from.
 * @param schema Schema to be used for deserializing this data.
 * @return the data read and converted according to the schema.
 * @throws IOException if there was an error reading.
 *///  w  w  w  .j  a va2  s  . c om
private static Object readData(DataInput in, Schema schema) throws IOException {
    switch (schema.getType()) {
    case INT:
        Integer intData = WritableUtils.readVInt(in);
        return intData;
    case LONG:
        Long longData = WritableUtils.readVLong(in);
        return longData;
    case DOUBLE:
        DoubleWritable doubleWritable = (DoubleWritable) WritableFactories.newInstance(DoubleWritable.class);
        doubleWritable.readFields(in);
        return doubleWritable.get();
    case ENUM:
    case STRING:
        String stringData = WritableUtils.readString(in);
        return stringData;
    case FLOAT:
        FloatWritable floatWritable = (FloatWritable) WritableFactories.newInstance(FloatWritable.class);
        floatWritable.readFields(in);
        return floatWritable.get();
    case ARRAY:
        List<Object> listData = Lists.newArrayList();
        Integer numElements = WritableUtils.readVInt(in);
        for (int c = 0; c < numElements; c++) {
            Object listElement = readData(in, schema.getElementType());
            listData.add(listElement);
        }
        return listData;
    case RECORD:
        GenericRecord recordData = new GenericData.Record(schema);
        Integer numFields = WritableUtils.readVInt(in);
        for (int c = 0; c < numFields; c++) {
            String fieldName = WritableUtils.readString(in);
            Object fieldData = readData(in, schema.getField(fieldName).schema());
            recordData.put(fieldName, fieldData);
        }
        return recordData;
    case MAP:
        Map<String, Object> mapData = Maps.newHashMap();
        Integer numEntries = WritableUtils.readVInt(in);
        for (int c = 0; c < numEntries; c++) {
            String key = WritableUtils.readString(in);
            Object value = readData(in, schema.getValueType());
            mapData.put(key, value);
        }
        return mapData;
    case UNION:
        Integer tag = WritableUtils.readVInt(in);
        Schema unionSubSchema = schema.getTypes().get(tag);
        Object unionData = readData(in, unionSubSchema);
        return unionData;
    case BYTES:
        byte[] bytesData = WritableUtils.readCompressedByteArray(in);
        return bytesData;
    case BOOLEAN:
        BooleanWritable booleanWritable = (BooleanWritable) WritableFactories
                .newInstance(BooleanWritable.class);
        booleanWritable.readFields(in);
        return booleanWritable.get();
    case NULL:
        return null;
    default:
        throw new UnsupportedOperationException("Unsupported type: " + schema.getType());
    }
}

From source file:com.moz.fiji.hive.io.FijiRowDataWritable.java

License:Apache License

@Override
public void readFields(DataInput in) throws IOException {
    EntityIdWritable entityIdWritable = (EntityIdWritable) WritableFactories
            .newInstance(EntityIdWritable.class);
    entityIdWritable.readFields(in);//from   w w w.  j  a v  a 2 s .  c o  m
    mEntityId = entityIdWritable;

    int numDecodedData = WritableUtils.readVInt(in);

    // We need to dirty the decoded data so that these objects can be reused.
    mDecodedData = null;

    mWritableData = Maps.newHashMap();
    for (int c = 0; c < numDecodedData; c++) {
        String columnText = WritableUtils.readString(in);
        FijiColumnName column = new FijiColumnName(columnText);

        NavigableMap<Long, FijiCellWritable> data = Maps.newTreeMap();
        int numCells = WritableUtils.readVInt(in);
        for (int d = 0; d < numCells; d++) {
            long ts = WritableUtils.readVLong(in);
            FijiCellWritable cellWritable = (FijiCellWritable) WritableFactories
                    .newInstance(FijiCellWritable.class);
            cellWritable.readFields(in);
            data.put(ts, cellWritable);
        }

        mWritableData.put(column, data);
    }

    mSchemas = Maps.newHashMap();
    int numSchemas = WritableUtils.readVInt(in);
    for (int c = 0; c < numSchemas; c++) {
        String columnText = WritableUtils.readString(in);
        FijiColumnName column = new FijiColumnName(columnText);
        String schemaString = WritableUtils.readString(in);
        Schema schema = new Schema.Parser().parse(schemaString);
        mSchemas.put(column, schema);
    }
}

From source file:com.moz.fiji.hive.utils.ByteWritable.java

License:Apache License

public static <T extends Writable> T asWritable(byte[] bytes, Class<T> clazz) throws IOException {
    T result = null;//w w  w  .  j a  v  a 2 s  .co  m
    DataInputStream dataIn = null;
    try {
        result = (T) WritableFactories.newInstance(clazz);
        ByteArrayInputStream in = new ByteArrayInputStream(bytes);
        dataIn = new DataInputStream(in);
        result.readFields(dataIn);
    } finally {
        IOUtils.closeQuietly(dataIn);
    }
    return result;
}

From source file:eu.stratosphere.hadoopcompatibility.HadoopInputSplitWrapper.java

License:Apache License

@Override
public void read(DataInput in) throws IOException {
    this.splitNumber = in.readInt();
    this.hadoopInputSplitTypeName = in.readUTF();
    if (hadoopInputSplit == null) {
        try {//from w  w  w  .j  av a2 s  .com
            Class<? extends org.apache.hadoop.io.Writable> inputSplit = Class.forName(hadoopInputSplitTypeName)
                    .asSubclass(org.apache.hadoop.io.Writable.class);
            this.hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories
                    .newInstance(inputSplit);
        } catch (Exception e) {
            throw new RuntimeException("Unable to create InputSplit", e);
        }
    }
    this.hadoopInputSplit.readFields(in);
}

From source file:eu.stratosphere.hadoopcompatibility.mapreduce.wrapper.HadoopInputSplit.java

License:Apache License

@Override
public void read(DataInput in) throws IOException {
    this.splitNumber = in.readInt();
    String className = in.readUTF();

    if (this.mapreduceInputSplit == null) {
        try {/*  ww w. j  a v  a2 s .  c  om*/
            Class<? extends org.apache.hadoop.io.Writable> inputSplit = Class.forName(className)
                    .asSubclass(org.apache.hadoop.io.Writable.class);
            this.mapreduceInputSplit = (org.apache.hadoop.mapreduce.InputSplit) WritableFactories
                    .newInstance(inputSplit);
        } catch (Exception e) {
            throw new RuntimeException("Unable to create InputSplit", e);
        }
    }
    ((Writable) this.mapreduceInputSplit).readFields(in);
}

From source file:org.apache.crunch.types.writable.GenericArrayWritable.java

License:Apache License

public void readFields(DataInput in) throws IOException {
    values = new Writable[WritableUtils.readVInt(in)]; // construct values
    if (values.length > 0) {
        int nulls = WritableUtils.readVInt(in);
        if (nulls == values.length) {
            return;
        }/*from   w ww. j a  va2  s. co m*/
        String valueType = Text.readString(in);
        setValueType(valueType);
        for (int i = 0; i < values.length - nulls; i++) {
            Writable value = WritableFactories.newInstance(valueClass);
            value.readFields(in); // read a value
            values[i] = value; // store it in values
        }
    }
}

From source file:org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit.java

License:Apache License

private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
    // read the parent fields and the final fields
    in.defaultReadObject();//from  w  ww. j ava2  s  .  c  o m

    // the job conf knows how to deserialize itself
    jobConf = new JobConf();
    jobConf.readFields(in);

    try {
        hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType);
    } catch (Exception e) {
        throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e);
    }

    if (hadoopInputSplit instanceof Configurable) {
        ((Configurable) hadoopInputSplit).setConf(this.jobConf);
    } else if (hadoopInputSplit instanceof JobConfigurable) {
        ((JobConfigurable) hadoopInputSplit).configure(this.jobConf);
    }
    hadoopInputSplit.readFields(in);
}

From source file:org.apache.flink.api.java.hadoop.mapreduce.wrapper.HadoopInputSplit.java

License:Apache License

private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
    // read the parent fields and the final fields
    in.defaultReadObject();/*from ww  w.  jav a2  s  .c o m*/

    try {
        Class<? extends Writable> writableSplit = splitType.asSubclass(Writable.class);
        mapreduceInputSplit = (org.apache.hadoop.mapreduce.InputSplit) WritableFactories
                .newInstance(writableSplit);
    }

    catch (Exception e) {
        throw new RuntimeException("Unable to instantiate the Hadoop InputSplit", e);
    }

    ((Writable) mapreduceInputSplit).readFields(in);
}

From source file:org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopInputSplit.java

License:Apache License

@Override
public void read(DataInputView in) throws IOException {
    this.splitNumber = in.readInt();
    this.hadoopInputSplitTypeName = in.readUTF();
    if (hadoopInputSplit == null) {
        try {/*from  w  w w.  jav  a  2  s.  c  o m*/
            Class<? extends org.apache.hadoop.io.Writable> inputSplit = Class.forName(hadoopInputSplitTypeName)
                    .asSubclass(org.apache.hadoop.io.Writable.class);
            this.hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories
                    .newInstance(inputSplit);
        } catch (Exception e) {
            throw new RuntimeException("Unable to create InputSplit", e);
        }
    }
    this.hadoopInputSplit.readFields(in);
}