Example usage for org.apache.hadoop.io FloatWritable FloatWritable

List of usage examples for org.apache.hadoop.io FloatWritable FloatWritable

Introduction

In this page you can find the example usage for org.apache.hadoop.io FloatWritable FloatWritable.

Prototype

public FloatWritable(float value) 

Source Link

Usage

From source file:hivemall.utils.hadoop.WritableUtils.java

License:Open Source License

public static FloatWritable val(final float v) {
    return new FloatWritable(v);
}

From source file:hivemall.utils.hadoop.WritableUtils.java

License:Open Source License

public static Writable toWritable(Object object) {
    if (object == null) {
        return null; //return NullWritable.get();
    }/*from   w ww  .  j  ava  2 s .c  om*/
    if (object instanceof Writable) {
        return (Writable) object;
    }
    if (object instanceof String) {
        return new Text((String) object);
    }
    if (object instanceof Long) {
        return new VLongWritable((Long) object);
    }
    if (object instanceof Integer) {
        return new VIntWritable((Integer) object);
    }
    if (object instanceof Byte) {
        return new ByteWritable((Byte) object);
    }
    if (object instanceof Double) {
        return new DoubleWritable((Double) object);
    }
    if (object instanceof Float) {
        return new FloatWritable((Float) object);
    }
    if (object instanceof Boolean) {
        return new BooleanWritable((Boolean) object);
    }
    if (object instanceof byte[]) {
        return new BytesWritable((byte[]) object);
    }
    return new BytesWritable(object.toString().getBytes());
}

From source file:hydrograph.engine.cascading.scheme.hive.parquet.ParquetWritableUtils.java

License:Apache License

private static Writable createPrimitive(final Object obj, final PrimitiveObjectInspector inspector)
        throws SerDeException {
    if (obj == null) {
        return null;
    }/*from  ww  w  . j  a va2 s  .c  o m*/

    switch (inspector.getPrimitiveCategory()) {
    case VOID:
        return null;
    case BOOLEAN:
        return new BooleanWritable(
                ((BooleanObjectInspector) inspector).get(new BooleanWritable((boolean) obj)));
    case BYTE:
        return new ByteWritable(((ByteObjectInspector) inspector).get(new ByteWritable((byte) obj)));
    case DOUBLE:
        return new DoubleWritable(((DoubleObjectInspector) inspector).get(new DoubleWritable((double) obj)));
    case FLOAT:
        return new FloatWritable(((FloatObjectInspector) inspector).get(new FloatWritable((float) obj)));
    case INT:
        return new IntWritable(((IntObjectInspector) inspector).get(new IntWritable((int) obj)));
    case LONG:
        return new LongWritable(((LongObjectInspector) inspector).get(new LongWritable((long) obj)));
    case SHORT:
        return new ShortWritable(((ShortObjectInspector) inspector).get(new ShortWritable((short) obj)));
    case STRING:
        String v;
        if (obj instanceof Long) {
            SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd");
            Date date = new Date((long) obj);
            v = df.format(date);
        } else if (obj instanceof BigDecimal) {
            BigDecimal bigDecimalObj = (BigDecimal) obj;
            v = bigDecimalObj.toString();
        } else {
            v = ((StringObjectInspector) inspector).getPrimitiveJavaObject(obj);
        }
        try {
            return new BytesWritable(v.getBytes("UTF-8"));
        } catch (UnsupportedEncodingException e) {
            throw new SerDeException("Failed to encode string in UTF-8", e);
        }
    case DECIMAL:
        HiveDecimal hd;
        if (obj instanceof Double) {
            hd = HiveDecimal.create(new BigDecimal((Double) obj));
        } else if (obj instanceof BigDecimal) {
            hd = HiveDecimal.create((BigDecimal) obj);
        } else {
            // if "obj" is other than Double or BigDecimal and a vaild
            // number, .toString, will get its correct number representation
            // and a BigDecimal object will be created
            hd = HiveDecimal.create(new BigDecimal(obj.toString()));
        }
        return new HiveDecimalWritable(hd);
    case TIMESTAMP:
        return new TimestampWritable(((TimestampObjectInspector) inspector)
                .getPrimitiveJavaObject(new TimestampWritable(new Timestamp((long) obj))));
    case DATE:
        return new DateWritable(((DateObjectInspector) inspector)
                .getPrimitiveJavaObject(new DateWritable(new Date((long) obj))));
    case CHAR:
        String strippedValue = ((HiveCharObjectInspector) inspector).getPrimitiveJavaObject(obj)
                .getStrippedValue();
        return new BytesWritable(Binary.fromString(strippedValue).getBytes());
    case VARCHAR:
        String value = ((HiveVarcharObjectInspector) inspector).getPrimitiveJavaObject(obj).getValue();
        return new BytesWritable(Binary.fromString(value).getBytes());
    default:
        throw new SerDeException("Unknown primitive : " + inspector.getPrimitiveCategory());
    }
}

From source file:io.druid.data.input.orc.OrcHadoopInputRowParserTest.java

License:Apache License

@Test
public void testParse() {
    final String typeString = "struct<timestamp:string,col1:string,col2:array<string>,col3:float,col4:bigint,col5:decimal,col6:array<string>>";
    final OrcHadoopInputRowParser parser = new OrcHadoopInputRowParser(new TimeAndDimsParseSpec(
            new TimestampSpec("timestamp", "auto", null), new DimensionsSpec(null, null, null)), typeString);

    final SettableStructObjectInspector oi = (SettableStructObjectInspector) OrcStruct
            .createObjectInspector(TypeInfoUtils.getTypeInfoFromTypeString(typeString));
    final OrcStruct struct = (OrcStruct) oi.create();
    struct.setNumFields(7);//from  w ww  . ja  v a2 s .  co  m
    oi.setStructFieldData(struct, oi.getStructFieldRef("timestamp"), new Text("2000-01-01"));
    oi.setStructFieldData(struct, oi.getStructFieldRef("col1"), new Text("foo"));
    oi.setStructFieldData(struct, oi.getStructFieldRef("col2"),
            ImmutableList.of(new Text("foo"), new Text("bar")));
    oi.setStructFieldData(struct, oi.getStructFieldRef("col3"), new FloatWritable(1));
    oi.setStructFieldData(struct, oi.getStructFieldRef("col4"), new LongWritable(2));
    oi.setStructFieldData(struct, oi.getStructFieldRef("col5"), new HiveDecimalWritable(3));
    oi.setStructFieldData(struct, oi.getStructFieldRef("col6"), null);

    final InputRow row = parser.parse(struct);
    Assert.assertEquals("timestamp", new DateTime("2000-01-01"), row.getTimestamp());
    Assert.assertEquals("col1", "foo", row.getRaw("col1"));
    Assert.assertEquals("col2", ImmutableList.of("foo", "bar"), row.getRaw("col2"));
    Assert.assertEquals("col3", 1.0f, row.getRaw("col3"));
    Assert.assertEquals("col4", 2L, row.getRaw("col4"));
    Assert.assertEquals("col5", 3.0d, row.getRaw("col5"));
    Assert.assertNull("col6", row.getRaw("col6"));
}

From source file:io.druid.indexer.InputRowSerde.java

License:Apache License

public static final byte[] toBytes(final InputRow row, AggregatorFactory[] aggs) {
    try {/*from   w w w  .  ja v  a 2s  . c  om*/
        ByteArrayDataOutput out = ByteStreams.newDataOutput();

        //write timestamp
        out.writeLong(row.getTimestampFromEpoch());

        //writing all dimensions
        List<String> dimList = row.getDimensions();

        Text[] dims = EMPTY_TEXT_ARRAY;
        if (dimList != null) {
            dims = new Text[dimList.size()];
            for (int i = 0; i < dims.length; i++) {
                dims[i] = new Text(dimList.get(i));
            }
        }
        StringArrayWritable sw = new StringArrayWritable(dims);
        sw.write(out);

        MapWritable mw = new MapWritable();

        if (dimList != null) {
            for (String dim : dimList) {
                List<String> dimValue = row.getDimension(dim);

                if (dimValue == null || dimValue.size() == 0) {
                    continue;
                }

                if (dimValue.size() == 1) {
                    mw.put(new Text(dim), new Text(dimValue.get(0)));
                } else {
                    Text[] dimValueArr = new Text[dimValue.size()];
                    for (int i = 0; i < dimValueArr.length; i++) {
                        dimValueArr[i] = new Text(dimValue.get(i));
                    }
                    mw.put(new Text(dim), new StringArrayWritable(dimValueArr));
                }
            }
        }

        //writing all metrics
        Supplier<InputRow> supplier = new Supplier<InputRow>() {
            @Override
            public InputRow get() {
                return row;
            }
        };
        for (AggregatorFactory aggFactory : aggs) {
            String k = aggFactory.getName();

            Aggregator agg = aggFactory
                    .factorize(IncrementalIndex.makeColumnSelectorFactory(aggFactory, supplier, true));
            agg.aggregate();

            String t = aggFactory.getTypeName();

            if (t.equals("float")) {
                mw.put(new Text(k), new FloatWritable(agg.getFloat()));
            } else if (t.equals("long")) {
                mw.put(new Text(k), new LongWritable(agg.getLong()));
            } else {
                //its a complex metric
                Object val = agg.get();
                ComplexMetricSerde serde = getComplexMetricSerde(t);
                mw.put(new Text(k), new BytesWritable(serde.toBytes(val)));
            }
        }

        mw.write(out);
        return out.toByteArray();
    } catch (IOException ex) {
        throw Throwables.propagate(ex);
    }
}

From source file:it.crs4.pydoop.mapreduce.pipes.TestPipesNonJavaInputFormat.java

License:Apache License

/**
 *  test PipesNonJavaInputFormat//from  w  w w. j  a  v  a 2  s . c  om
  */

@Test
public void testFormat() throws IOException, InterruptedException {
    JobID jobId = new JobID("201408272347", 0);
    TaskID taskId = new TaskID(jobId, TaskType.MAP, 0);
    TaskAttemptID taskAttemptid = new TaskAttemptID(taskId, 0);

    Job job = new Job(new Configuration());
    job.setJobID(jobId);
    Configuration conf = job.getConfiguration();

    TaskAttemptContextImpl tcontext = new TaskAttemptContextImpl(conf, taskAttemptid);

    PipesNonJavaInputFormat input_format = new PipesNonJavaInputFormat();

    DummyRecordReader reader = (DummyRecordReader) input_format.createRecordReader(new FileSplit(), tcontext);
    assertEquals(0.0f, reader.getProgress(), 0.001);

    // input and output files
    File input1 = new File(workSpace + File.separator + "input1");
    if (!input1.getParentFile().exists()) {
        Assert.assertTrue(input1.getParentFile().mkdirs());
    }

    if (!input1.exists()) {
        Assert.assertTrue(input1.createNewFile());
    }

    File input2 = new File(workSpace + File.separator + "input2");
    if (!input2.exists()) {
        Assert.assertTrue(input2.createNewFile());
    }

    // THIS fill fail without hdfs support.
    // // set data for splits
    // conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
    //          StringUtils.escapeString(input1.getAbsolutePath()) + ","
    //          + StringUtils.escapeString(input2.getAbsolutePath()));
    // List<InputSplit> splits = input_format.getSplits(job);
    // assertTrue(splits.size() >= 2);

    PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader = new PipesNonJavaInputFormat.PipesDummyRecordReader(
            new FileSplit(), tcontext);
    // empty dummyRecordReader
    assertEquals(0.0, dummyRecordReader.getProgress(), 0.001);
    // test method next
    assertTrue(dummyRecordReader.next(new FloatWritable(2.0f), NullWritable.get()));
    assertEquals(2.0, dummyRecordReader.getProgress(), 0.001);
    dummyRecordReader.close();
}

From source file:mapreduce2.SpeciesViewerMapper.java

public void map(WritableComparable key, Writable value, OutputCollector output, Reporter reporter)
        throws IOException {

    // get the current page
    String data = ((Text) value).toString();
    int index = data.indexOf(":");
    if (index == -1) {
        return;//from w  ww.j  a v  a  2 s  . c  o  m
    }

    // split into title and PR (tab or variable number of blank spaces)
    String toParse = data.substring(0, index).trim();
    String[] splits = toParse.split("\t");
    if (splits.length == 0) {
        splits = toParse.split(" ");
        if (splits.length == 0) {
            return;
        }
    }
    String pagetitle = splits[0].trim();
    String pagerank = splits[splits.length - 1].trim();

    // parse score
    double currScore = 0.0;
    try {
        currScore = Double.parseDouble(pagerank);
    } catch (Exception e) {
        currScore = 0.0;
    }

    // collect
    //     output.collect(new FloatWritable((float) - currScore), key); 
    output.collect(new FloatWritable((float) -currScore), new Text(pagetitle.replaceAll("[^A-Za-z]", " ")));
}

From source file:mapreducejava.SpeciesViewerMapper.java

public void map(WritableComparable key, Writable value, OutputCollector output, Reporter reporter)
        throws IOException {

    // get the current page
    String data = ((Text) value).toString();
    int index = data.indexOf(":");
    if (index == -1) {
        return;// ww  w .  j  a  v  a2  s.c om
    }

    // split into title and PR (tab or variable number of blank spaces)
    String toParse = data.substring(0, index).trim();
    String[] splits = toParse.split("\t");
    if (splits.length == 0) {
        splits = toParse.split(" ");
        if (splits.length == 0) {
            return;
        }
    }
    String pagetitle = splits[0].trim();
    String pagerank = splits[splits.length - 1].trim();

    // parse score
    double currScore = 0.0;
    try {
        currScore = Double.parseDouble(pagerank);
    } catch (Exception e) {
        currScore = 0.0;
    }

    // collect
    //output.collect(new FloatWritable((float) - currScore), key); 
    output.collect(new FloatWritable((float) -currScore), new Text(pagetitle));
}

From source file:mapreducemaxstock.StockPriceMapper.java

public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

    String line = value.toString();
    String[] items = line.split(",");

    String stock = items[1];//from  w  w  w .j  a  v  a  2 s.c o  m
    Float closePrice = Float.parseFloat(items[6]);

    context.write(new Text(stock), new FloatWritable(closePrice));
}

From source file:mapreducemaxstock.StockPriceReducer.java

public void reduce(Text key, Iterable<FloatWritable> values, Context context)
        throws IOException, InterruptedException {
    float maxClosePrice = Float.MIN_VALUE;

    for (FloatWritable value : values) {
        maxClosePrice = Math.max(maxClosePrice, value.get());
    }/*from  w w  w .java2  s .  co m*/

    context.write(key, new FloatWritable(maxClosePrice));
}