Example usage for java.io DataOutput write

List of usage examples for java.io DataOutput write

Introduction

In this page you can find the example usage for java.io DataOutput write.

Prototype

void write(byte b[], int off, int len) throws IOException;

Source Link

Document

Writes len bytes from array b, in order, to the output stream.

Usage

From source file:org.apache.cassandra.db.compaction.LazilyCompactedRow.java

public void write(DataOutput out) throws IOException {
    DataOutputBuffer clockOut = new DataOutputBuffer();
    ColumnFamily.serializer().serializeCFInfo(emptyColumnFamily, clockOut);

    long dataSize = headerBuffer.getLength() + clockOut.getLength() + columnSerializedSize;
    if (logger.isDebugEnabled())
        logger.debug(String.format("header / clock / column sizes are %s / %s / %s", headerBuffer.getLength(),
                clockOut.getLength(), columnSerializedSize));
    assert dataSize > 0;
    out.writeLong(dataSize);//from   w  ww.j  a v a2  s . co m
    out.write(headerBuffer.getData(), 0, headerBuffer.getLength());
    out.write(clockOut.getData(), 0, clockOut.getLength());
    out.writeInt(columnCount);

    Iterator<IColumn> iter = iterator();
    while (iter.hasNext()) {
        IColumn column = iter.next();
        emptyColumnFamily.getColumnSerializer().serialize(column, out);
    }
    long secondPassColumnSize = reducer == null ? 0 : reducer.serializedSize;
    assert secondPassColumnSize == columnSerializedSize : "originally calculated column size of "
            + columnSerializedSize + " but now it is " + secondPassColumnSize;
}

From source file:org.apache.cassandra.io.LazilyCompactedRow.java

public void write(DataOutput out) throws IOException {
    if (rows.size() == 1 && !shouldPurge && !controller.needDeserialize()) {
        SSTableIdentityIterator row = rows.get(0);
        assert row.dataSize > 0;
        out.writeLong(row.dataSize);/*from  w w  w  .  j  a  v  a 2s. c om*/
        row.echoData(out);
        return;
    }

    DataOutputBuffer clockOut = new DataOutputBuffer();
    ColumnFamily.serializer().serializeCFInfo(emptyColumnFamily, clockOut);

    long dataSize = headerBuffer.getLength() + clockOut.getLength() + columnSerializedSize;
    assert dataSize > 0;
    out.writeLong(dataSize);
    out.write(headerBuffer.getData(), 0, headerBuffer.getLength());
    out.write(clockOut.getData(), 0, clockOut.getLength());
    out.writeInt(columnCount);

    Iterator<IColumn> iter = iterator();
    while (iter.hasNext()) {
        IColumn column = iter.next();
        emptyColumnFamily.getColumnSerializer().serialize(column, out);
    }
}

From source file:org.apache.crunch.types.writable.TupleWritable.java

/**
 * Writes each Writable to <code>out</code>.
 *///from   ww  w  .  j a v  a2s  .c  om
public void write(DataOutput out) throws IOException {
    DataOutputBuffer tmp = new DataOutputBuffer();
    WritableUtils.writeVInt(out, values.length);
    for (int i = 0; i < values.length; ++i) {
        WritableUtils.writeVInt(out, written[i]);
        if (written[i] != 0) {
            tmp.reset();
            values[i].write(tmp);
            WritableUtils.writeVInt(out, tmp.getLength());
            out.write(tmp.getData(), 0, tmp.getLength());
        }
    }
}

From source file:org.apache.hadoop.hbase.filter.RowListFilter.java

@Override
public void write(DataOutput dout) throws IOException {
    dout.writeInt(bytesSetIterator.previousIndex());
    dout.writeInt(bytesSet.size());//from w  w w  .jav  a2 s .c o m
    for (byte[] bytes : bytesSet) {
        dout.writeShort(bytes.length);
        dout.write(bytes, 0, bytes.length);
    }
}

From source file:org.apache.hadoop.hbase.KeyValue.java

/**
 * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
 * @param kv//from  w w w .  j a v  a2 s  .  c  om
 * @param out
 * @return Length written on stream
 * @throws IOException
 * @see #create(DataInput) for the inverse function
 */
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
    // This is how the old Writables write used to serialize KVs.  Need to figure way to make it
    // work for all implementations.
    int length = kv.getLength();
    out.writeInt(length);
    out.write(kv.getBuffer(), kv.getOffset(), length);
    return length + Bytes.SIZEOF_INT;
}

From source file:org.apache.hadoop.hbase.KeyValueUtil.java

/**
 * Write out a KeyValue in the manner in which we used to when KeyValue was a
 * Writable./* ww w.j ava  2  s.  c  om*/
 *
 * @param kv
 * @param out
 * @return Length written on stream
 * @throws IOException
 * @see #create(DataInput) for the inverse function
 */
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
    // This is how the old Writables write used to serialize KVs. Need to figure
    // way to make it
    // work for all implementations.
    int length = kv.getLength();
    out.writeInt(length);
    out.write(kv.getBuffer(), kv.getOffset(), length);
    return (long) length + Bytes.SIZEOF_INT;
}

From source file:org.apache.hadoop.hive.accumulo.AccumuloHiveRow.java

@Override
public void write(DataOutput dataOutput) throws IOException {
    if (null != rowId) {
        dataOutput.writeBoolean(true);/*from  ww  w . j a  v  a2s  . c  o m*/
        dataOutput.writeUTF(rowId);
    } else {
        dataOutput.writeBoolean(false);
    }
    int size = tuples.size();
    dataOutput.writeInt(size);
    for (ColumnTuple tuple : tuples) {
        Text cf = tuple.getCf(), cq = tuple.getCq();
        dataOutput.writeInt(cf.getLength());
        dataOutput.write(cf.getBytes(), 0, cf.getLength());
        dataOutput.writeInt(cq.getLength());
        dataOutput.write(cq.getBytes(), 0, cq.getLength());
        byte[] value = tuple.getValue();
        dataOutput.writeInt(value.length);
        dataOutput.write(value);
    }
}

From source file:org.apache.hadoop.hive.ql.io.orc.OrcSplit.java

@Override
public void write(DataOutput out) throws IOException {
    //serialize path, offset, length using FileSplit
    super.write(out);

    int flags = (hasBase ? BASE_FLAG : 0) | (isOriginal ? ORIGINAL_FLAG : 0) | (hasFooter ? FOOTER_FLAG : 0)
            | (fileId != null ? HAS_FILEID_FLAG : 0);
    out.writeByte(flags);//from w  w w . j  a  va 2s  .  co  m
    out.writeInt(deltas.size());
    for (AcidInputFormat.DeltaMetaData delta : deltas) {
        delta.write(out);
    }
    if (hasFooter) {
        // serialize FileMetaInfo fields
        Text.writeString(out, fileMetaInfo.compressionType);
        WritableUtils.writeVInt(out, fileMetaInfo.bufferSize);
        WritableUtils.writeVInt(out, fileMetaInfo.metadataSize);

        // serialize FileMetaInfo field footer
        ByteBuffer footerBuff = fileMetaInfo.footerBuffer;
        footerBuff.reset();

        // write length of buffer
        WritableUtils.writeVInt(out, footerBuff.limit() - footerBuff.position());
        out.write(footerBuff.array(), footerBuff.position(), footerBuff.limit() - footerBuff.position());
        WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId());
    }
    if (fileId != null) {
        out.writeLong(fileId.longValue());
    }
}

From source file:org.apache.hadoop.io.BytesWritable.java

public void write(DataOutput out) throws IOException {
    out.writeInt(size);
    out.write(bytes, 0, size);
}

From source file:org.apache.hawq.pxf.service.io.Text.java

@Override
public void write(DataOutput out) throws IOException {
    byte[] bytes = getBytes();
    out.write(bytes, 0, getLength());
}