Example usage for org.apache.hadoop.io WritableUtils writeVLong

List of usage examples for org.apache.hadoop.io WritableUtils writeVLong

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeVLong.

Prototype

public static void writeVLong(DataOutput stream, long i) throws IOException 

Source Link

Document

Serializes a long to a binary stream with zero-compressed encoding.

Usage

From source file:org.commoncrawl.util.TimeSeriesDataFile.java

License:Open Source License

/** append a record to the file ...  
 * /*from   w  w  w .ja  v  a  2s  .  co  m*/
 * @param key
 * @param value
 * @throws IOException
 */
public synchronized long appendRecordToLogFile(long key, Writable value) throws IOException {

    LogFileHeader header = new LogFileHeader();

    boolean preExistingHeader = fileName.exists();

    RandomAccessFile file = new RandomAccessFile(fileName, "rw");

    long recordPositionOut = -1;

    try {

        if (preExistingHeader) {

            long headerOffset = readLogFileHeader(file, header);

            if (header._writePos == 0) {
                recordPositionOut = headerOffset;
            } else {
                recordPositionOut = header._writePos;
            }
            // seelk to appropriate write position 
            file.seek(recordPositionOut);

        } else {
            recordPositionOut = writeLogFileHeader(file, header);
        }

        DataOutputBuffer buffer = new DataOutputBuffer();

        // write out sync bytes ... 
        buffer.writeInt(SyncBytes);
        // write out placeholder for record length 
        buffer.writeInt(0);
        // write out placeholder for crc 
        buffer.writeLong(0);
        // write out key + value to buffer
        WritableUtils.writeVLong(buffer, key);
        // write out value ... 
        value.write(buffer);
        // write out trailing record size (4 bytes sync + 4 bytes record length + 4 bytes crc + key/value buffer +  
        buffer.writeInt(buffer.getLength());
        // reset crc 
        crc.reset();
        //calc crc 
        crc.update(buffer.getData(), RECORD_HEADER_LENGTH, buffer.getLength() - RECORD_HEADER_LENGTH);
        // ok fix up record ... 
        // write out record length
        // total length - sync bytes(4) - record length(4), at offset 4
        writeInt(buffer.getLength() - 8, 4, buffer.getData());
        // and write out crc
        // at offset 8 (after sync(4) and length(4)
        writeLong(crc.getValue(), 8, buffer.getData());

        // and then the data 
        file.write(buffer.getData(), 0, buffer.getLength());

        // now update header ... 
        header._itemCount += 1;
        header._writePos = file.getFilePointer();
        header._lastRecordLength = buffer.getLength() - 4;
        header._lastRecordKey = key;
        // now write out header anew ... 
        writeLogFileHeader(file, header);

    } finally {
        if (file != null) {
            file.close();
        }
    }

    return recordPositionOut;
}

From source file:org.kiji.hive.io.KijiRowDataWritable.java

License:Apache License

/**
 * Helper function to write a column and its associated data.
 *
 * @param out DataOutput for the Hadoop Writable to write to.
 * @param kijiColumnName to write//from   w w w. j a  v  a 2s  .c o  m
 * @param data to write
 * @throws IOException if there was an issue.
 */
private void writeColumn(DataOutput out, KijiColumnName kijiColumnName,
        NavigableMap<Long, KijiCellWritable> data) throws IOException {
    WritableUtils.writeString(out, kijiColumnName.getName());
    WritableUtils.writeVInt(out, data.size()); // number in the timeseries
    for (Map.Entry<Long, KijiCellWritable> cellEntry : data.entrySet()) {
        WritableUtils.writeVLong(out, cellEntry.getKey());
        cellEntry.getValue().write(out);
    }
}

From source file:org.terrier.compression.integer.ByteOutputStream.java

License:Mozilla Public License

@Override
public int writeVLong(long x) throws IOException {

    int bytes = WritableUtils.getVIntSize(x);
    WritableUtils.writeVLong(dos, x);
    byteOffset += bytes;/*w  w  w.j a  v  a 2  s . c o m*/

    return bytes;
}

From source file:org.testies.RelativeKey.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {

    out.writeByte(fieldsSame);//from  w ww.j  a v a2  s . c o  m

    // System.out.printf("wrote fs %x\n", fieldsSame);

    bytesWritten += 1;

    if ((fieldsSame & ROW_SAME) == 0) {
        write(out, key.getRowData());
    }

    if ((fieldsSame & CF_SAME) == 0) {
        write(out, key.getColumnFamilyData());
    }

    if ((fieldsSame & CQ_SAME) == 0) {

        write(out, key.getColumnQualifierData());

        /*
         * Integer id = colFams.get(key.getColumnQualifier()); if(id == null){ id = nextId++; colFams.put(key.getColumnQualifier(), id); }
         * 
         * WritableUtils.writeVInt(out, id); bytesWritten += 1;
         */

    }

    if ((fieldsSame & CV_SAME) == 0) {
        write(out, key.getColumnVisibilityData());
    }

    if ((fieldsSame & TS_SAME) == 0) {
        WritableUtils.writeVLong(out, key.getTimestamp());
    }
}

From source file:st.happy_camper.hadoop.aggregate.AccessWritable.java

License:Apache License

public void write(DataOutput out) throws IOException {
    WritableUtils.writeString(out, access.getIp());
    WritableUtils.writeString(out, access.getUrl());
    WritableUtils.writeVLong(out, access.getAccessDate().getTime());
}