Example usage for org.apache.hadoop.io MapWritable readFields

List of usage examples for org.apache.hadoop.io MapWritable readFields

Introduction

In this page you can find the example usage for org.apache.hadoop.io MapWritable readFields.

Prototype

@Override
    public void readFields(DataInput in) throws IOException 

Source Link

Usage

From source file:edu.ub.ahstfg.io.index.ArrayIndex.java

License:Open Source License

@Override
public void readFields(DataInput input) throws IOException {
    ArrayWritable wTerms = new ArrayWritable(Text.class);
    wTerms.readFields(input);//ww  w .java 2s .c o  m
    terms = WritableConverter.arrayWritable2LinkedListString(wTerms);

    MapWritable wTermFreq = new MapWritable();
    wTermFreq.readFields(input);
    termFreq = WritableConverter.mapWritable2HashMapStringLinkedListShort(wTermFreq);

    ArrayWritable wKeywords = new ArrayWritable(Text.class);
    wKeywords.readFields(input);
    keywords = WritableConverter.arrayWritable2LinkedListString(wKeywords);

    MapWritable wKeywordFreq = new MapWritable();
    wKeywordFreq.readFields(input);
    keywordFreq = WritableConverter.mapWritable2HashMapStringLinkedListShort(wKeywordFreq);
}

From source file:gaffer.accumulostore.key.core.AbstractCoreKeyAccumuloElementConverter.java

License:Apache License

@Override
public Properties getPropertiesFromValue(final String group, final Value value)
        throws AccumuloElementConversionException {
    final Properties properties = new Properties();
    if (value == null || value.getSize() == 0) {
        return properties;
    }// w w w . ja va 2 s.  c  om
    final MapWritable map = new MapWritable();
    try (final InputStream inStream = new ByteArrayInputStream(value.get());
            final DataInputStream dataStream = new DataInputStream(inStream)) {
        map.readFields(dataStream);
    } catch (final IOException e) {
        throw new AccumuloElementConversionException("Failed to read map writable from value", e);
    }
    final StoreElementDefinition elementDefinition = storeSchema.getElement(group);
    if (null == elementDefinition) {
        throw new AccumuloElementConversionException("No StoreElementDefinition found for group " + group
                + " is this group in your Store Schema or do your table iterators need updating?");
    }
    for (final Writable writeableKey : map.keySet()) {
        final String propertyName = writeableKey.toString();
        final BytesWritable propertyValueBytes = (BytesWritable) map.get(writeableKey);
        try {
            properties.put(propertyName, elementDefinition.getProperty(propertyName).getSerialiser()
                    .deserialise(propertyValueBytes.getBytes()));
        } catch (final SerialisationException e) {
            throw new AccumuloElementConversionException("Failed to deserialise property " + propertyName, e);
        }
    }
    return properties;
}

From source file:io.druid.indexer.InputRowSerde.java

License:Apache License

public static final InputRow fromBytes(byte[] data, AggregatorFactory[] aggs) {
    try {/*from   w  w  w .  j a  va 2 s. co m*/
        DataInput in = ByteStreams.newDataInput(data);

        //Read timestamp
        long timestamp = in.readLong();

        //Read dimensions
        StringArrayWritable sw = new StringArrayWritable();
        sw.readFields(in);
        List<String> dimensions = Arrays.asList(sw.toStrings());

        MapWritable mw = new MapWritable();
        mw.readFields(in);

        Map<String, Object> event = Maps.newHashMap();

        for (String d : dimensions) {
            Writable v = mw.get(new Text(d));

            if (v == null) {
                continue;
            }

            if (v instanceof Text) {
                event.put(d, ((Text) v).toString());
            } else if (v instanceof StringArrayWritable) {
                event.put(d, Arrays.asList(((StringArrayWritable) v).toStrings()));
            } else {
                throw new ISE("unknown dim value type %s", v.getClass().getName());
            }
        }

        //Read metrics
        for (AggregatorFactory aggFactory : aggs) {
            String k = aggFactory.getName();
            Writable v = mw.get(new Text(k));

            if (v == null) {
                continue;
            }

            String t = aggFactory.getTypeName();

            if (t.equals("float")) {
                event.put(k, ((FloatWritable) v).get());
            } else if (t.equals("long")) {
                event.put(k, ((LongWritable) v).get());
            } else {
                //its a complex metric
                ComplexMetricSerde serde = getComplexMetricSerde(t);
                BytesWritable bw = (BytesWritable) v;
                event.put(k, serde.fromBytes(bw.getBytes(), 0, bw.getLength()));
            }
        }

        return new MapBasedInputRow(timestamp, dimensions, event);
    } catch (IOException ex) {
        throw Throwables.propagate(ex);
    }
}

From source file:org.apache.accumulo.core.client.mapreduce.lib.impl.InputConfigurator.java

License:Apache License

/**
 * Returns all {@link InputTableConfig} objects associated with this job.
 *
 * @param implementingClass//from  w ww  .  j av a  2s .c  om
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @return all of the table query configs for the job
 * @since 1.6.0
 */
public static Map<String, InputTableConfig> getInputTableConfigs(Class<?> implementingClass,
        Configuration conf) {
    Map<String, InputTableConfig> configs = new HashMap<>();
    Map.Entry<String, InputTableConfig> defaultConfig = getDefaultInputTableConfig(implementingClass, conf);
    if (defaultConfig != null)
        configs.put(defaultConfig.getKey(), defaultConfig.getValue());
    String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS));
    MapWritable mapWritable = new MapWritable();
    if (configString != null) {
        try {
            byte[] bytes = Base64.getDecoder().decode(configString);
            ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
            mapWritable.readFields(new DataInputStream(bais));
            bais.close();
        } catch (IOException e) {
            throw new IllegalStateException(
                    "The table query configurations could not be deserialized from the given configuration");
        }
    }
    for (Map.Entry<Writable, Writable> entry : mapWritable.entrySet())
        configs.put(((Text) entry.getKey()).toString(), (InputTableConfig) entry.getValue());

    return configs;
}

From source file:org.apache.accumulo.core.clientImpl.mapreduce.lib.InputConfigurator.java

License:Apache License

/**
 * Returns all InputTableConfig objects associated with this job.
 *
 * @param implementingClass/*w w w.j  a  v a2s  . com*/
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @return all of the table query configs for the job
 * @since 1.6.0
 */
public static Map<String, org.apache.accumulo.core.client.mapreduce.InputTableConfig> getInputTableConfigs(
        Class<?> implementingClass, Configuration conf) {
    Map<String, org.apache.accumulo.core.client.mapreduce.InputTableConfig> configs = new HashMap<>();
    Map.Entry<String, org.apache.accumulo.core.client.mapreduce.InputTableConfig> defaultConfig = getDefaultInputTableConfig(
            implementingClass, conf);
    if (defaultConfig != null)
        configs.put(defaultConfig.getKey(), defaultConfig.getValue());
    String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS));
    MapWritable mapWritable = new MapWritable();
    if (configString != null) {
        try {
            byte[] bytes = Base64.getDecoder().decode(configString);
            ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
            mapWritable.readFields(new DataInputStream(bais));
            bais.close();
        } catch (IOException e) {
            throw new IllegalStateException("The table query configurations could not be deserialized"
                    + " from the given configuration");
        }
    }
    for (Map.Entry<Writable, Writable> entry : mapWritable.entrySet())
        configs.put(entry.getKey().toString(),
                (org.apache.accumulo.core.client.mapreduce.InputTableConfig) entry.getValue());

    return configs;
}

From source file:org.apache.accumulo.hadoopImpl.mapreduce.lib.InputConfigurator.java

License:Apache License

/**
 * Returns all {@link InputTableConfig} objects associated with this job.
 *
 * @param implementingClass// ww  w. j  ava2  s.  com
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @param tableName
 *          the table name for which to retrieve the configuration
 * @return all of the table query configs for the job
 * @since 1.6.0
 */
private static Map<String, InputTableConfig> getInputTableConfigs(Class<?> implementingClass,
        Configuration conf, String tableName) {
    Map<String, InputTableConfig> configs = new HashMap<>();
    Map.Entry<String, InputTableConfig> defaultConfig = getDefaultInputTableConfig(implementingClass, conf,
            tableName);
    if (defaultConfig != null)
        configs.put(defaultConfig.getKey(), defaultConfig.getValue());
    String configString = conf.get(enumToConfKey(implementingClass, ScanOpts.TABLE_CONFIGS));
    MapWritable mapWritable = new MapWritable();
    if (configString != null) {
        try {
            byte[] bytes = Base64.getDecoder().decode(configString);
            ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
            mapWritable.readFields(new DataInputStream(bais));
            bais.close();
        } catch (IOException e) {
            throw new IllegalStateException("The table query configurations could not be deserialized"
                    + " from the given configuration");
        }
    }
    for (Map.Entry<Writable, Writable> entry : mapWritable.entrySet())
        configs.put(entry.getKey().toString(), (InputTableConfig) entry.getValue());

    return configs;
}

From source file:org.apache.flume.channel.file.FlumeEvent.java

License:Apache License

@Override
public void readFields(DataInput in) throws IOException {
    MapWritable map = new MapWritable();
    map.readFields(in);
    setHeaders(fromMapWritable(map));//from  w  w w  . j  a v a2s.  c  om
    byte[] body = null;
    int bodyLength = in.readInt();
    if (bodyLength != -1) {
        body = new byte[bodyLength];
        in.readFully(body);
    }
    setBody(body);
}

From source file:org.apache.flume.channel.recoverable.memory.RecoverableMemoryChannelEvent.java

License:Apache License

@Override
public void readFields(DataInput in) throws IOException {
    sequenceId = in.readLong();//from   w  w w  . j a  v  a  2  s .  c om
    MapWritable map = new MapWritable();
    map.readFields(in);
    setHeaders(fromMapWritable(map));
    byte[] body = null;
    int bodyLength = in.readInt();
    if (bodyLength != -1) {
        body = new byte[bodyLength];
        in.readFully(body);
    }
    setBody(body);
}

From source file:org.apache.gora.util.WritableUtils.java

License:Apache License

public static final Properties readProperties(DataInput in) throws IOException {
    Properties props = new Properties();
    MapWritable propsWritable = new MapWritable();
    propsWritable.readFields(in);
    for (Entry<Writable, Writable> prop : propsWritable.entrySet()) {
        String key = prop.getKey().toString();
        String value = prop.getValue().toString();
        props.put(key, value);/*from   w  w  w  . j av  a  2s.  com*/
    }
    return props;
}

From source file:org.apache.nutch.crawl.CrawlDatum.java

License:Apache License

public void readFields(DataInput in) throws IOException {
    byte version = in.readByte(); // read version
    if (version > CUR_VERSION) // check version
        throw new VersionMismatchException(CUR_VERSION, version);

    status = in.readByte();//from   w w  w.ja v a2 s.  c  om
    fetchTime = in.readLong();
    retries = in.readByte();
    if (version > 5) {
        fetchInterval = in.readInt();
    } else
        fetchInterval = Math.round(in.readFloat());
    score = in.readFloat();
    if (version > 2) {
        modifiedTime = in.readLong();
        int cnt = in.readByte();
        if (cnt > 0) {
            signature = new byte[cnt];
            in.readFully(signature);
        } else
            signature = null;
    }

    if (version > 3) {
        boolean hasMetadata = false;
        if (version < 7) {
            org.apache.hadoop.io.MapWritable oldMetaData = new org.apache.hadoop.io.MapWritable();
            if (in.readBoolean()) {
                hasMetadata = true;
                metaData = new org.apache.hadoop.io.MapWritable();
                oldMetaData.readFields(in);
            }
            for (Writable key : oldMetaData.keySet()) {
                metaData.put(key, oldMetaData.get(key));
            }
        } else {
            if (in.readBoolean()) {
                hasMetadata = true;
                metaData = new org.apache.hadoop.io.MapWritable();
                metaData.readFields(in);
            }
        }
        if (hasMetadata == false)
            metaData = null;
    }
    // translate status codes
    if (version < 5) {
        if (oldToNew.containsKey(status))
            status = oldToNew.get(status);
        else
            status = STATUS_DB_UNFETCHED;

    }
}