Example usage for org.apache.hadoop.io FloatWritable FloatWritable

List of usage examples for org.apache.hadoop.io FloatWritable FloatWritable

Introduction

In this page you can find the example usage for org.apache.hadoop.io FloatWritable FloatWritable.

Prototype

public FloatWritable(float value) 

Source Link

Usage

From source file:com.hotels.corc.sarg.EvaluatorFactory.java

License:Apache License

static Comparable<?> toComparable(PrimitiveCategory category, Object literal) {
    String stringLiteral;/*from  www. j  a v a  2s  .c om*/
    switch (category) {
    case STRING:
        return new Text((String) literal);
    case BOOLEAN:
        return new BooleanWritable((Boolean) literal);
    case BYTE:
        return new ByteWritable(((Long) literal).byteValue());
    case SHORT:
        return new ShortWritable(((Long) literal).shortValue());
    case INT:
        return new IntWritable(((Long) literal).intValue());
    case LONG:
        return new LongWritable((Long) literal);
    case FLOAT:
        return new FloatWritable(((Double) literal).floatValue());
    case DOUBLE:
        return new DoubleWritable((Double) literal);
    case TIMESTAMP:
        return new TimestampWritable((Timestamp) literal);
    case DATE:
        return (DateWritable) literal;
    case CHAR:
        stringLiteral = (String) literal;
        return new HiveCharWritable(new HiveChar(stringLiteral, stringLiteral.length()));
    case VARCHAR:
        stringLiteral = (String) literal;
        return new HiveVarcharWritable(new HiveVarchar(stringLiteral, stringLiteral.length()));
    case DECIMAL:
        return new HiveDecimalWritable(HiveDecimal.create((BigDecimal) literal));
    default:
        throw new IllegalArgumentException("Unsupported category: " + category);
    }
}

From source file:com.hotels.corc.sarg.EvaluatorFactoryTest.java

License:Apache License

@SuppressWarnings("rawtypes")
@Test
public void floatTypical() {
    assertThat(toComparable(FLOAT, 0.0D), is((Comparable) new FloatWritable(0.0F)));
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.TestJsonNodeWritableUtils.java

License:Apache License

@Test
public void test_transform() {
    final ObjectMapper mapper = BeanTemplateUtils.configureMapper(Optional.empty());
    new JsonNodeWritableUtils(); //coverage!

    assertEquals(NullNode.instance, JsonNodeWritableUtils.transform("banana", JsonNodeFactory.instance));
    assertEquals(null, JsonNodeWritableUtils.transform(null, JsonNodeFactory.instance));
    assertEquals(NullNode.instance,//from   w  w w  .  j a va 2s .c  o  m
            JsonNodeWritableUtils.transform(NullWritable.get(), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(true, JsonNode.class),
            JsonNodeWritableUtils.transform(new BooleanWritable(true), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue("test", JsonNode.class),
            JsonNodeWritableUtils.transform(new Text("test"), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(new byte[] { (byte) 0xFF }, JsonNode.class),
            JsonNodeWritableUtils.transform(new ByteWritable((byte) 0xFF), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(4, JsonNode.class),
            JsonNodeWritableUtils.transform(new IntWritable(4), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(4, JsonNode.class),
            JsonNodeWritableUtils.transform(new VIntWritable(4), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(4L, JsonNode.class),
            JsonNodeWritableUtils.transform(new LongWritable(4), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(4L, JsonNode.class),
            JsonNodeWritableUtils.transform(new VLongWritable(4), JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(new byte[] { (byte) 0xFF, (byte) 0xFE }, JsonNode.class),
            JsonNodeWritableUtils.transform(new BytesWritable(new byte[] { (byte) 0xFF, (byte) 0xFE }),
                    JsonNodeFactory.instance));
    assertEquals(mapper.convertValue(4.0, JsonNode.class),
            JsonNodeWritableUtils.transform(new DoubleWritable(4), JsonNodeFactory.instance));
    //(had real trouble creating a float node!)
    assertEquals(JsonNodeFactory.instance.numberNode(Float.valueOf((float) 4.0)),
            JsonNodeWritableUtils.transform(new FloatWritable(4), JsonNodeFactory.instance));

    // will test object writable and array writable below      
}

From source file:com.ML_Hadoop.K_meansClustering.K_meansClusteringMap.java

@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
    FloatWritable[] temp = new FloatWritable[feature_size + 1];

    for (int i = 0; i < number_of_clusters; i++) {
        temp[0] = new FloatWritable(num_of_members_in_a_cluster[i]);
        for (int j = 1; j < feature_size + 1; j++) {
            temp[j] = new FloatWritable(sum_of_members_in_a_cluster.get(i)[j - 1]);
        }/*from  ww  w .java 2s.com*/
        context.write(new LongWritable(i), new FloatArrayWritable(temp));
    }
}

From source file:com.ML_Hadoop.K_meansClustering.K_meansClusteringReduce.java

@Override // The word '@Override' is necessary otherwise it runs default reduce()
public void reduce(LongWritable key, Iterable<FloatArrayWritable> values, Context context)
        throws IOException, InterruptedException {

    int[] num_of_members_in_a_cluster = new int[number_of_clusters];
    int key_index = (int) key.get();

    for (FloatArrayWritable val : values) {
        num_of_members_in_a_cluster[key_index] += (int) ((FloatWritable) (val.get())[0]).get();
        Float[] temp = new Float[feature_size];
        for (int i = 0; i < feature_size; i++) {
            temp[i] = sum_of_members_of_a_cluster.get(key_index)[i]
                    + (Float) ((FloatWritable) (val.get())[i + 1]).get();
        }/* w w  w  .  jav a2 s.c o  m*/
        sum_of_members_of_a_cluster.set(key_index, temp);
    }

    Float[] temp = new Float[feature_size];
    for (int i = 0; i < feature_size; i++) {
        temp[i] = sum_of_members_of_a_cluster.get(key_index)[i] / num_of_members_in_a_cluster[key_index];
    }
    cetroid_of_a_cluster.set(key_index, temp);

    FloatWritable[] t = new FloatWritable[feature_size];
    for (int i = 0; i < feature_size; i++) {
        t[i] = new FloatWritable(cetroid_of_a_cluster.get(key_index)[i]);
    }

    cetroids_of_all_clusters.add(new FloatArrayWritable(t));
    context.write(key, new FloatArrayWritable(t));

}

From source file:com.ML_Hadoop.MultipleLinearRegression.MultipleLinearRegressionMap.java

@Override // is used as innermap to aggregate data before shuffling
protected void cleanup(Context context) throws IOException, InterruptedException {
    // aggregate results from the same map and then send to reducers
    Float[] temp = new Float[theta.length + 1];
    for (int i = 0; i < temp.length; i++)
        temp[i] = 0.0f;//w ww. j  a v  a 2  s  .  c o m

    for (int i = 0; i < prediction_error.size(); i++) // iterates on rows
        for (int j = 0; j < prediction_error.get(i).size(); j++) { // iterates on columns
            temp[j] += prediction_error.get(i).get(j);
        }

    for (int i = 0; i < temp.length; i++)
        context.write(new LongWritable(i), new FloatWritable(temp[i]));
}

From source file:com.ML_Hadoop.MultipleLinearRegression.MultipleLinearRegressionReduce.java

@Override // necessary otherwise it runs default reduce()
public void reduce(LongWritable key, Iterable<FloatWritable> values, Context context)
        throws IOException, InterruptedException {

    // decode keys, key=0 is the cost and rest are values for theta
    if ((int) key.get() == 0) { //key==new LongWritable(0)) is also correct
        Float cost = 0.0f;//ww  w.j  a  v  a 2  s.  c  o m
        for (FloatWritable val : values)
            cost += val.get();
        prediction_error = cost;
        context.write(key, new FloatWritable(cost));
    } else { // extracts theta
        Float cost = 0.0f;
        for (FloatWritable val : values)
            cost += val.get();

        // update theta
        System.out.println("cost for key: " + cost);
        System.out.println("cost  " + cost * alpha / input_data_size);

        int key_index = (int) key.get() - 1;
        System.out.println("key_index: " + key_index);

        theta[key_index] -= cost * alpha / input_data_size;
        context.write(key, new FloatWritable(cost));
    }

}

From source file:com.ML_Hadoop.NaiveBayesClassifier_Continuous_Features.NaiveBayesClassifierMap_Continuous_Features.java

@Override // is used as innermap to aggregate data before shuffling
protected void cleanup(Context context) throws IOException, InterruptedException {
    //features_probabilities.put(class_id, features);
    Float[] sigma_x2 = new Float[number_of_features];
    Float[] sigma_x = new Float[number_of_features];
    Float[] mu_x_local = new Float[number_of_features];
    Float[] num_x_local = new Float[number_of_features];
    MapWritable[] map_output = new MapWritable[number_of_features];

    // It is a MUST to initilize all arrays before usage.
    for (int class_id = 0; class_id < number_of_classes; class_id++) {
        for (int i = 0; i < number_of_features; i++) {
            map_output[i] = new MapWritable(); // the way to initilize MapWritable[]
            sigma_x2[i] = 0.0f;/*from   w  w  w .j a v a  2 s .  com*/
            sigma_x[i] = 0.0f;
            mu_x_local[i] = 0.0f;
            num_x_local[i] = 0.0f;
        }
        for (int member_id_in_a_class_id = 0; member_id_in_a_class_id < num_of_members_in_each_class[class_id]; member_id_in_a_class_id++) {
            for (int feature_id_in_a_member_id = 0; feature_id_in_a_member_id < number_of_features; feature_id_in_a_member_id++) {
                sigma_x[feature_id_in_a_member_id] += (features_probabilities.get(class_id)
                        .get(member_id_in_a_class_id))[feature_id_in_a_member_id];
                sigma_x2[feature_id_in_a_member_id] += (features_probabilities.get(class_id)
                        .get(member_id_in_a_class_id))[feature_id_in_a_member_id]
                        * ((features_probabilities.get(class_id)
                                .get(member_id_in_a_class_id))[feature_id_in_a_member_id]);
            }
        }
        for (int feature_id_in_a_member_id = 0; feature_id_in_a_member_id < number_of_features; feature_id_in_a_member_id++) {
            num_x_local[feature_id_in_a_member_id] = (float) num_of_members_in_each_class[class_id];
            if (num_x_local[feature_id_in_a_member_id] == 0)
                mu_x_local[feature_id_in_a_member_id] = 0.0f;
            else
                mu_x_local[feature_id_in_a_member_id] = sigma_x[feature_id_in_a_member_id]
                        / num_x_local[feature_id_in_a_member_id];
        }

        for (int feature_id_in_a_member_id = 0; feature_id_in_a_member_id < number_of_features; feature_id_in_a_member_id++) {
            // key of MAP must be Writable (i.e., new Text("...")), but new string("...") is wrong.
            // value of MAP must be Writable or one subset !!! like FloatWritable
            map_output[feature_id_in_a_member_id].put(new Text("sigma_x"),
                    new FloatWritable(sigma_x[feature_id_in_a_member_id]));
            map_output[feature_id_in_a_member_id].put(new Text("sigma_x2"),
                    new FloatWritable(sigma_x2[feature_id_in_a_member_id]));
            map_output[feature_id_in_a_member_id].put(new Text("mu_x_local"),
                    new FloatWritable(mu_x_local[feature_id_in_a_member_id]));
            map_output[feature_id_in_a_member_id].put(new Text("num_x_local"),
                    new FloatWritable(num_x_local[feature_id_in_a_member_id]));
        }

        context.write(new LongWritable(class_id), new MapArrayWritable(map_output));
    }

}

From source file:com.ML_Hadoop.NaiveBayesClassifier_Continuous_Features.NaiveBayesClassifierReduce_Continuous_Features.java

@Override // necessary otherwise it runs default reduce()
public void reduce(LongWritable key, Iterable<MapArrayWritable> values, Context context)
        throws IOException, InterruptedException {

    int key_index = (int) key.get();

    Float[] sigma_x2 = new Float[number_of_features];
    Float[] mu_x = new Float[number_of_features];
    Float[] num_x = new Float[number_of_features];
    Float[] partial_num_x = new Float[number_of_features];
    Float[] total_num_x = new Float[number_of_features];

    Float[] class_id_mu = new Float[number_of_features];
    Float[] class_id_std = new Float[number_of_features];

    MapWritable[] t = new MapWritable[number_of_features];

    // It is a MUST to initilize all arrays before usage.
    for (int i = 0; i < number_of_features; i++) {
        t[i] = new MapWritable(); // each member of an array (including MapWritable[] ) MUST be initilized before use
        sigma_x2[i] = 0.0f;/* w  w  w .  jav a  2s . co m*/
        mu_x[i] = 0.0f;
        num_x[i] = 0.0f;
        partial_num_x[i] = 0.0f;
        total_num_x[i] = 0.0f;
        class_id_mu[i] = 0.0f;
        class_id_std[i] = 0.0f;
    }

    for (MapArrayWritable val : values) {
        for (int i = 0; i < number_of_features; i++) {
            num_x[i] = ((FloatWritable) ((MapWritable) (val.get()[i])).get(new Text("num_x_local"))).get();
            sigma_x2[i] += ((FloatWritable) ((MapWritable) (val.get()[i])).get(new Text("sigma_x2"))).get();
            mu_x[i] = ((FloatWritable) ((MapWritable) (val.get()[i])).get(new Text("mu_x_local"))).get();

            partial_num_x[i] += mu_x[i] * num_x[i]; // calculates mu(i)*N(i)
            total_num_x[i] += num_x[i]; // calculates total N=N1+N2+...+Nk
        }
    }

    for (int i = 0; i < number_of_features & total_num_x[0] != 0; i++) {
        class_id_mu[i] = partial_num_x[i] / total_num_x[i];
        class_id_std[i] = sigma_x2[i] / total_num_x[i] - (class_id_mu[i] * class_id_mu[i]);
    }

    for (int i = 0; i < number_of_features & total_num_x[0] != 0; i++) {
        t[i].put(new Text("class_id_mu"), new FloatWritable(class_id_mu[i]));
        t[i].put(new Text("class_id_std"), new FloatWritable(class_id_std[i]));
    }

    probablity_info_output.set(key_index, t);

}

From source file:com.moz.fiji.hive.io.FijiCellWritable.java

License:Apache License

/**
 * Reads and converts data according to the specified schema.
 *
 * @param out DataOutput to serialize this object into.
 * @param data data to be serialized.//from  www.  ja  va 2 s  .  c  om
 * @param schema Schema to be used for serializing this data.
 * @throws IOException if there was an error writing.
 */
private static void writeData(DataOutput out, Object data, Schema schema) throws IOException {
    switch (schema.getType()) {
    case INT:
        Integer intData = (Integer) data;
        WritableUtils.writeVInt(out, intData);
        break;
    case LONG:
        Long longData = (Long) data;
        WritableUtils.writeVLong(out, longData);
        break;
    case DOUBLE:
        Double doubleData = (Double) data;
        DoubleWritable doubleWritable = new DoubleWritable(doubleData);
        doubleWritable.write(out);
        break;
    case ENUM:
    case STRING:
        String stringData = data.toString();
        WritableUtils.writeString(out, stringData);
        break;
    case FLOAT:
        Float floatData = (Float) data;
        FloatWritable floatWritable = new FloatWritable(floatData);
        floatWritable.write(out);
        break;
    case ARRAY:
        List<Object> listData = (List<Object>) data;
        WritableUtils.writeVInt(out, listData.size());
        for (Object listElement : listData) {
            writeData(out, listElement, schema.getElementType());
        }
        break;
    case RECORD:
        IndexedRecord recordData = (IndexedRecord) data;
        WritableUtils.writeVInt(out, schema.getFields().size());
        for (Schema.Field field : schema.getFields()) {
            WritableUtils.writeString(out, field.name());
            writeData(out, recordData.get(field.pos()), field.schema());
        }
        break;
    case MAP:
        Map<String, Object> mapData = (Map<String, Object>) data;
        WritableUtils.writeVInt(out, mapData.size());
        for (Map.Entry<String, Object> entry : mapData.entrySet()) {
            WritableUtils.writeString(out, entry.getKey());
            writeData(out, entry.getValue(), schema.getValueType());
        }
        break;
    case UNION:
        final Integer tag = GenericData.get().resolveUnion(schema, data);
        WritableUtils.writeVInt(out, tag);
        Schema unionSubSchema = schema.getTypes().get(tag);
        writeData(out, data, unionSubSchema);
        break;
    case BYTES:
        byte[] bytesData = (byte[]) data;
        WritableUtils.writeCompressedByteArray(out, bytesData);
        break;
    case BOOLEAN:
        Boolean booleanData = (Boolean) data;
        BooleanWritable booleanWritable = new BooleanWritable(booleanData);
        booleanWritable.write(out);
        break;
    case NULL:
        // Don't need to write anything for null.
        break;
    case FIXED:
    default:
        throw new UnsupportedOperationException("Unsupported type: " + schema.getType());
    }
}