Example usage for org.apache.hadoop.io WritableUtils writeVInt

List of usage examples for org.apache.hadoop.io WritableUtils writeVInt

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeVInt.

Prototype

public static void writeVInt(DataOutput stream, int i) throws IOException 

Source Link

Document

Serializes an integer to a binary stream with zero-compressed encoding.

Usage

From source file:org.apache.tez.common.counters.FileSystemCounterGroup.java

License:Apache License

/**
 * FileSystemGroup ::= #scheme (scheme #counter (key value)*)*
 *//*from  w  w w .  j  av a  2  s  .c  om*/
@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, map.size()); // #scheme
    for (Map.Entry<String, Object[]> entry : map.entrySet()) {
        WritableUtils.writeString(out, entry.getKey()); // scheme
        // #counter for the above scheme
        WritableUtils.writeVInt(out, numSetCounters(entry.getValue()));
        for (Object counter : entry.getValue()) {
            if (counter == null)
                continue;
            FSCounter c = (FSCounter) ((TezCounter) counter).getUnderlyingCounter();
            WritableUtils.writeVInt(out, c.key.ordinal()); // key
            WritableUtils.writeVLong(out, c.getValue()); // value
        }
    }
}

From source file:org.apache.tez.common.counters.FrameworkCounterGroup.java

License:Apache License

/**
 * FrameworkGroup ::= #counter (key value)*
 *//*w w  w.j ava 2  s. c o  m*/
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, size());
    for (int i = 0; i < counters.length; ++i) {
        TezCounter counter = (C) counters[i];
        if (counter != null) {
            WritableUtils.writeVInt(out, i);
            WritableUtils.writeVLong(out, counter.getValue());
        }
    }
}

From source file:org.apache.tez.engine.common.shuffle.impl.InMemoryWriter.java

License:Apache License

public void append(DataInputBuffer key, DataInputBuffer value) throws IOException {
    int keyLength = key.getLength() - key.getPosition();
    if (keyLength < 0) {
        throw new IOException("Negative key-length not allowed: " + keyLength + " for " + key);
    }/*from   w w  w. ja v a2 s.c  om*/

    boolean sameKey = (key == IFile.REPEAT_KEY);

    int valueLength = value.getLength() - value.getPosition();
    if (valueLength < 0) {
        throw new IOException("Negative value-length not allowed: " + valueLength + " for " + value);
    }

    if (sameKey) {
        WritableUtils.writeVInt(out, IFile.RLE_MARKER);
        WritableUtils.writeVInt(out, valueLength);
        out.write(value.getData(), value.getPosition(), valueLength);
    } else {
        if (LOG.isDebugEnabled()) {
            LOG.debug("InMemWriter.append" + " key.data=" + key.getData() + " key.pos=" + key.getPosition()
                    + " key.len=" + key.getLength() + " val.data=" + value.getData() + " val.pos="
                    + value.getPosition() + " val.len=" + value.getLength());
        }
        WritableUtils.writeVInt(out, keyLength);
        WritableUtils.writeVInt(out, valueLength);
        out.write(key.getData(), key.getPosition(), keyLength);
        out.write(value.getData(), value.getPosition(), valueLength);
    }

}

From source file:org.apache.tez.engine.common.shuffle.impl.InMemoryWriter.java

License:Apache License

public void close() throws IOException {
    // Write EOF_MARKER for key/value length
    WritableUtils.writeVInt(out, IFile.EOF_MARKER);
    WritableUtils.writeVInt(out, IFile.EOF_MARKER);

    // Close the stream
    out.close();//from   ww w.ja  v a  2 s . c o m
    out = null;
}

From source file:org.apache.tez.engine.common.shuffle.impl.ShuffleHeader.java

License:Apache License

public void write(DataOutput out) throws IOException {
    Text.writeString(out, mapId);
    WritableUtils.writeVLong(out, compressedLength);
    WritableUtils.writeVLong(out, uncompressedLength);
    WritableUtils.writeVInt(out, forReduce);
}

From source file:org.apache.tez.engine.records.TezDependentTaskCompletionEvent.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    taskAttemptId.write(out);/* w w  w .  j a v a  2s .  c  o m*/
    //    out.writeBoolean(isMap);
    WritableUtils.writeEnum(out, status);
    WritableUtils.writeString(out, taskTrackerHttp);
    WritableUtils.writeVInt(out, taskRunTime);
    WritableUtils.writeVInt(out, eventId);
}

From source file:org.apache.tez.mapreduce.processor.MapUtils.java

License:Apache License

private static void writeSplitFiles(FileSystem fs, JobConf conf, InputSplit split) throws IOException {
    Path jobSplitFile = new Path(conf.get(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR,
            MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR_DEFAULT), MRJobConfig.JOB_SPLIT);
    LOG.info("Writing split to: " + jobSplitFile);
    FSDataOutputStream out = FileSystem.create(fs, jobSplitFile, new FsPermission(JOB_FILE_PERMISSION));

    long offset = out.getPos();
    Text.writeString(out, split.getClass().getName());
    split.write(out);//from  www .j av a 2s. c o m
    out.close();

    String[] locations = split.getLocations();

    SplitMetaInfo info = null;
    info = new JobSplit.SplitMetaInfo(locations, offset, split.getLength());

    Path jobSplitMetaInfoFile = new Path(conf.get(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR),
            MRJobConfig.JOB_SPLIT_METAINFO);

    FSDataOutputStream outMeta = FileSystem.create(fs, jobSplitMetaInfoFile,
            new FsPermission(JOB_FILE_PERMISSION));
    outMeta.write(SplitMetaInfoReaderTez.META_SPLIT_FILE_HEADER);
    WritableUtils.writeVInt(outMeta, SplitMetaInfoReaderTez.META_SPLIT_VERSION);
    WritableUtils.writeVInt(outMeta, 1); // Only 1 split meta info being written
    info.write(outMeta);
    outMeta.close();
}

From source file:org.apache.tez.runtime.library.common.shuffle.orderedgrouped.InMemoryWriter.java

License:Apache License

public void close() throws IOException {

    // write V_END_MARKER as needed
    writeValueMarker(out);/*w w  w  . j  a v  a 2s .co m*/

    // Write EOF_MARKER for key/value length
    WritableUtils.writeVInt(out, IFile.EOF_MARKER);
    WritableUtils.writeVInt(out, IFile.EOF_MARKER);

    // Close the stream
    out.close();
    out = null;
}

From source file:org.apache.tinkerpop.gremlin.hadoop.process.computer.giraph.RuleWritable.java

License:Apache License

@Override
public void write(final DataOutput output) throws IOException {
    WritableUtils.writeVInt(output, this.rule.ordinal());
    final byte[] objectBytes = Serializer.serializeObject(this.object);
    WritableUtils.writeVInt(output, objectBytes.length);
    output.write(objectBytes);/*from   ww w  .  jav a 2 s  . c o m*/

    /*
    final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    final Output out = new Output(outputStream);
    Constants.GRYO.writeClassAndObject(out, this.object);
    out.flush();
    WritableUtils.writeVInt(output, this.rule.ordinal());
    WritableUtils.writeVInt(output, outputStream.toByteArray().length);
    output.write(outputStream.toByteArray());
    out.close(); */
}

From source file:org.apache.tinkerpop.gremlin.hadoop.process.computer.util.Rule.java

License:Apache License

@Override
public void write(final DataOutput output) throws IOException {
    WritableUtils.writeVInt(output, this.operation.ordinal());
    final byte[] objectBytes = Serializer.serializeObject(this.object);
    WritableUtils.writeVInt(output, objectBytes.length);
    output.write(objectBytes);/* www  .j  av  a 2s  .c  om*/
}