Example usage for org.apache.hadoop.io WritableUtils writeVInt

List of usage examples for org.apache.hadoop.io WritableUtils writeVInt

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeVInt.

Prototype

public static void writeVInt(DataOutput stream, int i) throws IOException 

Source Link

Document

Serializes an integer to a binary stream with zero-compressed encoding.

Usage

From source file:org.kiji.hive.io.KijiRowDataWritable.java

License:Apache License

/**
 * Helper method for the {@link org.apache.hadoop.io.Writable} interface that for writing
 * KijiRowDataWritable objects.  If passed a KijiColumnName, it will replace the data for the
 * specified column(relevant for paging through results).
 *
 * @param out DataOutput for the Hadoop Writable to write to.
 * @param pageData map of columns to paged data to be substituted(or an empty map if there are
 *                 no pages to substitute).
 * @throws IOException if there was an issue.
 *//*from   w  w w. j a va2s. c om*/
protected void writeWithPages(DataOutput out,
        Map<KijiColumnName, NavigableMap<Long, KijiCellWritable>> pageData) throws IOException {

    // Write the EntityId
    mEntityId.write(out);

    // Count the total number of columns to write.
    Set<KijiColumnName> columnNames = Sets.newHashSet();
    for (KijiColumnName columnName : mWritableData.keySet()) {
        if (!mKijiQualifierPagers.containsKey(columnName.getFamily())) {
            columnNames.add(columnName);
        }
    }
    columnNames.addAll(pageData.keySet());
    WritableUtils.writeVInt(out, columnNames.size());

    // Write the unpaged data.
    for (Entry<KijiColumnName, NavigableMap<Long, KijiCellWritable>> entry : mWritableData.entrySet()) {
        KijiColumnName kijiColumnName = entry.getKey();
        if (!pageData.containsKey(kijiColumnName)
                && !mKijiQualifierPagers.containsKey(kijiColumnName.getFamily())) {
            // Only write if it's not part of the paged data.
            writeColumn(out, kijiColumnName, entry.getValue());
        }
    }

    // Write paged data if any.
    for (Entry<KijiColumnName, NavigableMap<Long, KijiCellWritable>> entry : pageData.entrySet()) {
        writeColumn(out, entry.getKey(), entry.getValue());
    }

    WritableUtils.writeVInt(out, mSchemas.size());
    for (Map.Entry<KijiColumnName, Schema> entry : mSchemas.entrySet()) {
        WritableUtils.writeString(out, entry.getKey().getName());
        WritableUtils.writeString(out, entry.getValue().toString());
    }
}

From source file:org.kiji.hive.io.KijiRowDataWritable.java

License:Apache License

/**
 * Helper function to write a column and its associated data.
 *
 * @param out DataOutput for the Hadoop Writable to write to.
 * @param kijiColumnName to write//from  w  w w . j ava 2s  .co  m
 * @param data to write
 * @throws IOException if there was an issue.
 */
private void writeColumn(DataOutput out, KijiColumnName kijiColumnName,
        NavigableMap<Long, KijiCellWritable> data) throws IOException {
    WritableUtils.writeString(out, kijiColumnName.getName());
    WritableUtils.writeVInt(out, data.size()); // number in the timeseries
    for (Map.Entry<Long, KijiCellWritable> cellEntry : data.entrySet()) {
        WritableUtils.writeVLong(out, cellEntry.getKey());
        cellEntry.getValue().write(out);
    }
}

From source file:org.lilyproject.mapreduce.RecordIdWritable.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    byte[] bytes = recordId.toBytes();
    WritableUtils.writeVInt(out, bytes.length);
    out.write(bytes);//from  www  .  j a v  a2 s .co m
}

From source file:org.terrier.compression.integer.ByteOutputStream.java

License:Mozilla Public License

@Override
public int writeVInt(int x) throws IOException {

    int bytes = WritableUtils.getVIntSize(x);
    WritableUtils.writeVInt(dos, x);
    byteOffset += bytes;//from  w w w . j  a  v  a2  s .c  o m

    return bytes;
}

From source file:org.terrier.structures.indexing.BlockDocumentPostingList.java

License:Mozilla Public License

@Override
public void write(final DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, getNumberOfPointers());
    try {/* w  w  w . ja v a 2s . co  m*/
        this.forEachTerm(new TObjectIntProcedure<String>() {
            public boolean execute(String term, int freq) {
                try {
                    Text.writeString(out, term);
                    WritableUtils.writeVInt(out, freq);
                    final int[] blocks = term_blocks.get(term).toArray();
                    Arrays.sort(blocks);
                    final int bf = blocks.length;
                    WritableUtils.writeVInt(out, bf);
                    if (bf == 0)
                        return true;
                    WritableUtils.writeVInt(out, blocks[0] + 1);
                    for (int i = 1; i < bf; i++)
                        WritableUtils.writeVInt(out, blocks[i] - blocks[i - 1]);

                } catch (IOException e) {
                    throw new Error(e);
                }
                return true;
            }
        });
    } catch (Error e) {
        throw (IOException) e.getCause();
    }
}

From source file:org.terrier.structures.indexing.DocumentPostingList.java

License:Mozilla Public License

public void write(final DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, getNumberOfPointers());
    try {/*w w  w.j  a v  a2s . c om*/
        this.forEachTerm(new TObjectIntProcedure<String>() {
            public boolean execute(String term, int freq) {
                try {
                    Text.writeString(out, term);
                    WritableUtils.writeVInt(out, freq);
                } catch (IOException e) {
                    throw new Error(e);
                }
                return true;
            }
        });
    } catch (Error e) {
        throw (IOException) e.getCause();
    }
}

From source file:org.terrier.structures.indexing.singlepass.hadoop.NewSplitEmittedTerm.java

License:Mozilla Public License

/**
 * Write out this Term key to output stream 'out'
 *///w  ww.  j  av a2 s .c  om
@Override
public void write(DataOutput out) throws IOException {
    if (USE_HADOOP_TEXT)
        Text.writeString(out, term);
    else
        out.writeUTF(term);
    WritableUtils.writeVInt(out, splitno);
    WritableUtils.writeVInt(out, flushno);
}

From source file:org.terrier.structures.indexing.singlepass.hadoop.SplitEmittedTerm.java

License:Mozilla Public License

/**
 * Write out this Term key to output stream 'out'
 *///from  ww w.  j  a  va  2 s  .  c o m
public void write(DataOutput out) throws IOException {
    if (USE_HADOOP_TEXT)
        Text.writeString(out, term);
    else
        out.writeUTF(term);
    WritableUtils.writeVInt(out, splitno);
    WritableUtils.writeVInt(out, flushno);
}

From source file:org.terrier.structures.postings.BasicPostingImpl.java

License:Mozilla Public License

/** Writes the current posting (not an iterable posting - use DirectInvertedOutputStream for that).
 * Compression using this method is not expected to be comparable to bit-level compression. */
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, id);
    WritableUtils.writeVInt(out, tf);//w  w w  .  j  a v  a  2s .c o m
}

From source file:org.terrier.structures.postings.bit.BlockFieldIterablePosting.java

License:Mozilla Public License

/** {@inheritDoc} */
@Override//from   w ww.  j av  a 2 s  . c o m
public void write(DataOutput out) throws IOException {
    super.write(out);
    out.writeInt(fieldFrequencies.length);
    for (int field_f : fieldFrequencies)
        out.writeInt(field_f);
    WritableUtils.writeVInt(out, positions.length);
    for (int pos : positions)
        WritableUtils.writeVInt(out, pos);
}