Example usage for org.apache.hadoop.io WritableUtils writeVInt

List of usage examples for org.apache.hadoop.io WritableUtils writeVInt

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeVInt.

Prototype

public static void writeVInt(DataOutput stream, int i) throws IOException 

Source Link

Document

Serializes an integer to a binary stream with zero-compressed encoding.

Usage

From source file:org.apache.phoenix.expression.LiteralExpression.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeVInt(output, (byteValue.length + 1) * (this.determinism == Determinism.ALWAYS ? 1 : -1));
    output.write(byteValue);/*from  ww  w .  j a  va2s . c  o m*/
    // since we need to support clients of a lower version, serialize the determinism enum ordinal in the int used to 
    // serialize sort order system value (which is either 1 or 2)
    int sortOrderAndDeterminism = ((this.determinism.ordinal() + 1) << 2) + sortOrder.getSystemValue();
    WritableUtils.writeVInt(output, sortOrderAndDeterminism);
    WritableUtils.writeVInt(output, this.type == null ? -1 : this.type.ordinal());
}

From source file:org.apache.phoenix.expression.OrderByExpression.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    output.writeBoolean(isNullsLast);//from   ww  w . jav  a 2 s. co  m
    output.writeBoolean(isAscending);
    WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
    expression.write(output);
}

From source file:org.apache.phoenix.expression.SingleCellColumnExpression.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    super.write(output);
    WritableUtils.writeVInt(output, decodedColumnQualifier);
    WritableUtils.writeVInt(output, encodingScheme.ordinal());
}

From source file:org.apache.phoenix.filter.BooleanExpressionFilter.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    try {/*from  w ww  . j ava  2s.co  m*/
        WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
        expression.write(output);
    } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry
        ServerUtil.throwIOException("BooleanExpressionFilter failed during writing", t);
    }
}

From source file:org.apache.phoenix.filter.ColumnProjectionFilter.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeCompressedByteArray(output, this.emptyCFName);
    WritableUtils.writeVInt(output, this.columnsTracker.size());
    for (Entry<ImmutableBytesPtr, NavigableSet<ImmutableBytesPtr>> entry : this.columnsTracker.entrySet()) {
        // write family name
        WritableUtils.writeCompressedByteArray(output, entry.getKey().copyBytes());
        int qaulsSize = entry.getValue() == null ? 0 : entry.getValue().size();
        WritableUtils.writeVInt(output, qaulsSize);
        if (qaulsSize > 0) {
            for (ImmutableBytesPtr cq : entry.getValue()) {
                // write qualifier name
                WritableUtils.writeCompressedByteArray(output, cq.copyBytes());
            }/* ww  w  .  j a v a  2s. co  m*/
        }
    }
    // Encode usesEncodedColumnNames in conditionOnlyCfs size.
    WritableUtils.writeVInt(output, (this.conditionOnlyCfs.size() + 1) * (usesEncodedColumnNames ? 1 : -1));
    for (byte[] f : this.conditionOnlyCfs) {
        WritableUtils.writeCompressedByteArray(output, f);
    }

}

From source file:org.apache.phoenix.filter.EncodedQualifiersColumnProjectionFilter.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeCompressedByteArray(output, this.emptyCFName);
    long[] longArrayOfBitSet = trackedColumns.toLongArray();
    WritableUtils.writeVInt(output, longArrayOfBitSet.length);
    for (Long l : longArrayOfBitSet) {
        WritableUtils.writeVLong(output, l);
    }/*from ww  w .  ja  va 2s  .  co m*/
    WritableUtils.writeVInt(output, encodingScheme.ordinal());
    WritableUtils.writeVInt(output, this.conditionOnlyCfs.size());
    for (byte[] f : this.conditionOnlyCfs) {
        WritableUtils.writeCompressedByteArray(output, f);
    }
}

From source file:org.apache.phoenix.filter.MultiEncodedCQKeyValueComparisonFilter.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    try {/*from w w  w .  j av a 2 s  .co  m*/
        WritableUtils.writeVInt(output, minQualifier);
        WritableUtils.writeVInt(output, maxQualifier);
        WritableUtils.writeVInt(output, whereExpressionMinQualifier);
        WritableUtils.writeVInt(output, whereExpressionMaxQualifier);
        WritableUtils.writeVInt(output, encodingScheme.ordinal());
        super.write(output);
        output.writeBoolean(allCFs);
        if (!allCFs) {
            Bytes.writeByteArray(output, essentialCF);
        }
    } catch (DoNotRetryIOException e) {
        throw e;
    } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry
        ServerUtil.throwIOException("MultiEncodedCQKeyValueComparisonFilter failed during writing", t);
    }
}

From source file:org.apache.phoenix.hive.mapreduce.PhoenixInputSplit.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    super.write(out);

    Preconditions.checkNotNull(scans);/*  w  ww .  j ava2s  .co m*/
    WritableUtils.writeVInt(out, scans.size());
    for (Scan scan : scans) {
        ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
        byte[] protoScanBytes = protoScan.toByteArray();
        WritableUtils.writeVInt(out, protoScanBytes.length);
        out.write(protoScanBytes);
    }

    WritableUtils.writeString(out, query);
    WritableUtils.writeVLong(out, regionSize);
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

/**
 * For client-side to serialize all IndexMaintainers for a given table
 * @param dataTable data table//w ww .  j ava2s  . co m
 * @param ptr bytes pointer to hold returned serialized value
 * @param indexes indexes to serialize
 */
public static void serialize(PTable dataTable, ImmutableBytesWritable ptr, List<PTable> indexes,
        PhoenixConnection connection) {
    Iterator<PTable> indexesItr = maintainedIndexes(indexes.iterator());
    if ((dataTable.isImmutableRows()) || !indexesItr.hasNext()) {
        indexesItr = maintainedLocalIndexes(indexesItr);
        if (!indexesItr.hasNext()) {
            ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
            return;
        }
    }
    int nIndexes = 0;
    while (indexesItr.hasNext()) {
        nIndexes++;
        indexesItr.next();
    }
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    DataOutputStream output = new DataOutputStream(stream);
    try {
        // Encode data table salting in sign of number of indexes
        WritableUtils.writeVInt(output, nIndexes * (dataTable.getBucketNum() == null ? 1 : -1));
        // Write out data row key schema once, since it's the same for all index maintainers
        dataTable.getRowKeySchema().write(output);
        indexesItr = dataTable.isImmutableRows() ? maintainedLocalIndexes(indexes.iterator())
                : maintainedIndexes(indexes.iterator());
        while (indexesItr.hasNext()) {
            org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintainer proto = IndexMaintainer
                    .toProto(indexesItr.next().getIndexMaintainer(dataTable, connection));
            byte[] protoBytes = proto.toByteArray();
            WritableUtils.writeVInt(output, protoBytes.length);
            output.write(protoBytes);
        }
    } catch (IOException e) {
        throw new RuntimeException(e); // Impossible
    }
    ptr.set(stream.toByteArray(), 0, stream.size());
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

/**
 * For client-side to append serialized IndexMaintainers of keyValueIndexes
 * @param dataTable data table//from www .j  av  a  2 s. c o  m
 * @param indexMetaDataPtr bytes pointer to hold returned serialized value
 * @param keyValueIndexes indexes to serialize
 */
public static void serializeAdditional(PTable table, ImmutableBytesWritable indexMetaDataPtr,
        List<PTable> keyValueIndexes, PhoenixConnection connection) {
    int nMutableIndexes = indexMetaDataPtr.getLength() == 0 ? 0 : ByteUtil.vintFromBytes(indexMetaDataPtr);
    int nIndexes = nMutableIndexes + keyValueIndexes.size();
    int estimatedSize = indexMetaDataPtr.getLength() + 1; // Just in case new size increases buffer
    if (indexMetaDataPtr.getLength() == 0) {
        estimatedSize += table.getRowKeySchema().getEstimatedByteSize();
    }
    for (PTable index : keyValueIndexes) {
        estimatedSize += index.getIndexMaintainer(table, connection).getEstimatedByteSize();
    }
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedSize + 1);
    DataOutput output = new DataOutputStream(stream);
    try {
        // Encode data table salting in sign of number of indexes
        WritableUtils.writeVInt(output, nIndexes * (table.getBucketNum() == null ? 1 : -1));
        // Serialize current mutable indexes, subtracting the vint size from the length
        // as its still included
        if (indexMetaDataPtr.getLength() > 0) {
            output.write(indexMetaDataPtr.get(), indexMetaDataPtr.getOffset(),
                    indexMetaDataPtr.getLength() - WritableUtils.getVIntSize(nMutableIndexes));
        } else {
            table.getRowKeySchema().write(output);
        }
        // Serialize mutable indexes afterwards
        for (PTable index : keyValueIndexes) {
            IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
            byte[] protoBytes = IndexMaintainer.toProto(maintainer).toByteArray();
            WritableUtils.writeVInt(output, protoBytes.length);
            output.write(protoBytes);
        }
    } catch (IOException e) {
        throw new RuntimeException(e); // Impossible
    }
    indexMetaDataPtr.set(stream.getBuffer(), 0, stream.size());
}