Example usage for org.apache.hadoop.io WritableUtils writeVInt

List of usage examples for org.apache.hadoop.io WritableUtils writeVInt

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils writeVInt.

Prototype

public static void writeVInt(DataOutput stream, int i) throws IOException 

Source Link

Document

Serializes an integer to a binary stream with zero-compressed encoding.

Usage

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

@Deprecated // Only called by code older than our 4.10 release
@Override// w  w w . j a  v  a  2  s  .  co  m
public void write(DataOutput output) throws IOException {
    // Encode nIndexSaltBuckets and isMultiTenant together
    WritableUtils.writeVInt(output, (nIndexSaltBuckets + 1) * (isMultiTenant ? -1 : 1));
    // Encode indexedColumns.size() and whether or not there's a viewIndexId
    WritableUtils.writeVInt(output, (indexedColumns.size() + 1) * (viewIndexId != null ? -1 : 1));
    if (viewIndexId != null) {
        output.write(viewIndexId);
    }
    for (ColumnReference ref : indexedColumns) {
        Bytes.writeByteArray(output, ref.getFamily());
        Bytes.writeByteArray(output, ref.getQualifier());
    }
    //TODO remove indexedColumnTypes in the next major release
    for (int i = 0; i < indexedColumnTypes.size(); i++) {
        PDataType type = indexedColumnTypes.get(i);
        WritableUtils.writeVInt(output, type.ordinal());
    }
    // Encode coveredColumns.size() and whether or not this is a local index
    WritableUtils.writeVInt(output, (coveredColumnsMap.size() + 1) * (isLocalIndex ? -1 : 1));
    for (ColumnReference ref : coveredColumnsMap.keySet()) {
        Bytes.writeByteArray(output, ref.getFamily());
        Bytes.writeByteArray(output, ref.getQualifier());
    }
    // TODO: remove when rowKeyOrderOptimizable hack no longer needed
    WritableUtils.writeVInt(output, indexTableName.length * (rowKeyOrderOptimizable ? 1 : -1));
    output.write(indexTableName, 0, indexTableName.length);
    Bytes.writeByteArray(output, dataEmptyKeyValueCF);
    // TODO in order to maintain b/w compatibility encode emptyKeyValueCFPtr.getLength() as a negative value (so we can distinguish between new and old clients)
    // when indexedColumnTypes is removed, remove this 
    WritableUtils.writeVInt(output, -emptyKeyValueCFPtr.getLength());
    output.write(emptyKeyValueCFPtr.get(), emptyKeyValueCFPtr.getOffset(), emptyKeyValueCFPtr.getLength());

    WritableUtils.writeVInt(output, indexedExpressions.size());
    for (Expression expression : indexedExpressions) {
        WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
        expression.write(output);
    }

    rowKeyMetaData.write(output);
    // Encode indexWALDisabled in nDataCFs
    WritableUtils.writeVInt(output, (nDataCFs + 1) * (indexWALDisabled ? -1 : 1));
    // Encode estimatedIndexRowKeyBytes and immutableRows together.
    WritableUtils.writeVInt(output, estimatedIndexRowKeyBytes * (immutableRows ? -1 : 1));
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer maintainer) throws IOException {
    ServerCachingProtos.IndexMaintainer.Builder builder = ServerCachingProtos.IndexMaintainer.newBuilder();
    builder.setSaltBuckets(maintainer.nIndexSaltBuckets);
    builder.setIsMultiTenant(maintainer.isMultiTenant);
    if (maintainer.viewIndexId != null) {
        builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId));
    }//w w w.ja  va 2  s  .  c  om
    for (ColumnReference colRef : maintainer.indexedColumns) {
        ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference
                .newBuilder();
        cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily()));
        cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier()));
        builder.addIndexedColumns(cRefBuilder.build());
    }
    for (PDataType dataType : maintainer.indexedColumnTypes) {
        builder.addIndexedColumnTypeOrdinal(dataType.ordinal());
    }
    for (Entry<ColumnReference, ColumnReference> e : maintainer.coveredColumnsMap.entrySet()) {
        ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference
                .newBuilder();
        ColumnReference dataTableColRef = e.getKey();
        cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily()));
        cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier()));
        builder.addDataTableColRefForCoveredColumns(cRefBuilder.build());
        if (maintainer.encodingScheme != NON_ENCODED_QUALIFIERS) {
            // We need to serialize the colRefs of index tables only in case of encoded column names.
            ColumnReference indexTableColRef = e.getValue();
            cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
            cRefBuilder.setFamily(ByteStringer.wrap(indexTableColRef.getFamily()));
            cRefBuilder.setQualifier(ByteStringer.wrap(indexTableColRef.getQualifier()));
            builder.addIndexTableColRefForCoveredColumns(cRefBuilder.build());
        }
    }
    builder.setIsLocalIndex(maintainer.isLocalIndex);
    builder.setIndexTableName(ByteStringer.wrap(maintainer.indexTableName));
    builder.setRowKeyOrderOptimizable(maintainer.rowKeyOrderOptimizable);
    builder.setDataTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.dataEmptyKeyValueCF));
    ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = ServerCachingProtos.ImmutableBytesWritable
            .newBuilder();
    ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get()));
    ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength());
    ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset());
    builder.setEmptyKeyValueColFamily(ibwBuilder.build());
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
        DataOutput output = new DataOutputStream(stream);
        for (Expression expression : maintainer.indexedExpressions) {
            WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
            expression.write(output);
        }
        builder.setIndexedExpressions(ByteStringer.wrap(stream.toByteArray()));
    }
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
        DataOutput output = new DataOutputStream(stream);
        maintainer.rowKeyMetaData.write(output);
        builder.setRowKeyMetadata(ByteStringer.wrap(stream.toByteArray()));
    }
    builder.setNumDataTableColFamilies(maintainer.nDataCFs);
    builder.setIndexWalDisabled(maintainer.indexWALDisabled);
    builder.setIndexRowKeyByteSize(maintainer.estimatedIndexRowKeyBytes);
    builder.setImmutable(maintainer.immutableRows);
    for (Pair<String, String> p : maintainer.indexedColumnsInfo) {
        ServerCachingProtos.ColumnInfo.Builder ciBuilder = ServerCachingProtos.ColumnInfo.newBuilder();
        if (p.getFirst() != null) {
            ciBuilder.setFamilyName(p.getFirst());
        }
        ciBuilder.setColumnName(p.getSecond());
        builder.addIndexedColumnInfo(ciBuilder.build());
    }
    builder.setEncodingScheme(maintainer.encodingScheme.getSerializedMetadataValue());
    builder.setImmutableStorageScheme(maintainer.immutableStorageScheme.getSerializedMetadataValue());
    return builder.build();
}

From source file:org.apache.phoenix.index.PhoenixIndexBuilder.java

License:Apache License

/**
 * Serialize ON DUPLICATE KEY UPDATE info with the following format:
 * 1) Boolean value tracking whether or not to execute the first ON DUPLICATE KEY clause.
 *    We know the clause should be executed when there are other UPSERT VALUES clauses earlier in
 *    the same batch for this row key. We need this for two main cases: 
 *       UPSERT VALUES followed by UPSERT VALUES ON DUPLICATE KEY UPDATE
 *       UPSERT VALUES ON DUPLICATE KEY IGNORE followed by UPSERT VALUES ON DUPLICATE KEY UPDATE
 * 2) Short value tracking how many times the next first clause should be executed. This
 *    optimizes the same clause be executed many times by only serializing it once.
 * 3) Repeating {List<Expression>, PTable} pairs that encapsulate the ON DUPLICATE KEY clause.
 * @param table table representing columns being updated
 * @param expressions list of expressions to evaluate for updating columns
 * @return serialized byte array representation of ON DUPLICATE KEY UPDATE info
 *//*from w  w w  . j  a  v a  2  s  . c o m*/
public static byte[] serializeOnDupKeyUpdate(PTable table, List<Expression> expressions) {
    PTableProtos.PTable ptableProto = PTableImpl.toProto(table);
    int size = ptableProto.getSerializedSize();
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream(size * 2)) {
        DataOutputStream output = new DataOutputStream(stream);
        output.writeBoolean(true); // Skip this ON DUPLICATE KEY clause if row already exists
        output.writeShort(1); // Execute this ON DUPLICATE KEY once
        WritableUtils.writeVInt(output, expressions.size());
        for (int i = 0; i < expressions.size(); i++) {
            Expression expression = expressions.get(i);
            WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
            expression.write(output);
        }
        ptableProto.writeDelimitedTo(output);
        return stream.toByteArray();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.phoenix.join.HashCacheClient.java

License:Apache License

private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize,
        List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression,
        List<Expression> keyRangeRhsValues) throws SQLException {
    long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(
            QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
    estimatedSize = Math.min(estimatedSize, maxSize);
    if (estimatedSize > Integer.MAX_VALUE) {
        throw new IllegalStateException("Estimated size(" + estimatedSize
                + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
    }//from   ww  w.j a  va 2 s .  c  o  m
    try {
        TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int) estimatedSize);
        DataOutputStream out = new DataOutputStream(baOut);
        // Write onExpressions first, for hash key evaluation along with deserialization
        out.writeInt(onExpressions.size());
        for (Expression expression : onExpressions) {
            WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
            expression.write(out);
        }
        int exprSize = baOut.size() + Bytes.SIZEOF_INT;
        out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
        int nRows = 0;
        out.writeInt(nRows); // In the end will be replaced with total number of rows            
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
            TupleUtil.write(result, out);
            if (baOut.size() > maxSize) {
                throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size()
                        + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
            }
            // Evaluate key expressions for hash join key range optimization.
            if (keyRangeRhsExpression != null) {
                keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
            }
            nRows++;
        }
        TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
        DataOutputStream dataOut = new DataOutputStream(sizeOut);
        try {
            dataOut.writeInt(nRows);
            dataOut.flush();
            byte[] cache = baOut.getBuffer();
            // Replace number of rows written above with the correct value.
            System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
            // Reallocate to actual size plus compressed buffer size (which is allocated below)
            int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
            byte[] compressed = new byte[maxCompressedSize]; // size for worst case
            int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
            // Last realloc to size of compressed buffer.
            ptr.set(compressed, 0, compressedSize);
        } finally {
            dataOut.close();
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        iterator.close();
    }
}

From source file:org.apache.phoenix.join.HashJoinInfo.java

License:Apache License

public static void serializeHashJoinIntoScan(Scan scan, HashJoinInfo joinInfo) {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    try {//  w w w .j  av a2s  .  c o  m
        DataOutputStream output = new DataOutputStream(stream);
        joinInfo.joinedSchema.write(output);
        int count = joinInfo.joinIds.length;
        WritableUtils.writeVInt(output, count);
        for (int i = 0; i < count; i++) {
            joinInfo.joinIds[i].write(output);
            WritableUtils.writeVInt(output, joinInfo.joinExpressions[i].size());
            for (Expression expr : joinInfo.joinExpressions[i]) {
                WritableUtils.writeVInt(output, ExpressionType.valueOf(expr).ordinal());
                expr.write(output);
            }
            WritableUtils.writeVInt(output, joinInfo.joinTypes[i].ordinal());
            output.writeBoolean(joinInfo.earlyEvaluation[i]);
            joinInfo.schemas[i].write(output);
            WritableUtils.writeVInt(output, joinInfo.fieldPositions[i]);
        }
        if (joinInfo.postJoinFilterExpression != null) {
            WritableUtils.writeVInt(output,
                    ExpressionType.valueOf(joinInfo.postJoinFilterExpression).ordinal());
            joinInfo.postJoinFilterExpression.write(output);
        } else {
            WritableUtils.writeVInt(output, -1);
        }
        WritableUtils.writeVInt(output, joinInfo.limit == null ? -1 : joinInfo.limit);
        output.writeBoolean(joinInfo.forceProjection);
        scan.setAttribute(HASH_JOIN, stream.toByteArray());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

}

From source file:org.apache.phoenix.join.ScanProjector.java

License:Apache License

public static void serializeProjectorIntoScan(Scan scan, ScanProjector projector) {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    try {/*from   w  w  w.jav a 2 s.co  m*/
        DataOutputStream output = new DataOutputStream(stream);
        projector.schema.write(output);
        int count = projector.expressions.length;
        WritableUtils.writeVInt(output, count);
        for (int i = 0; i < count; i++) {
            WritableUtils.writeVInt(output, ExpressionType.valueOf(projector.expressions[i]).ordinal());
            projector.expressions[i].write(output);
        }
        scan.setAttribute(SCAN_PROJECTOR, stream.toByteArray());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

}

From source file:org.apache.phoenix.mapreduce.FormatToBytesWritableMapper.java

License:Apache License

/**
 * Collect all column values for the same Row. RowKey may be different if indexes are involved,
 * so it writes a separate record for each unique RowKey
 *
 * @param context    Current mapper context
 * @param tableName Table index in tableNames list
 * @param lkv        List of KV values that will be combined in a single ImmutableBytesWritable
 * @throws IOException/* www .j  av a 2  s .c  om*/
 * @throws InterruptedException
 */

private void writeAggregatedRow(Context context, String tableName, List<KeyValue> lkv)
        throws IOException, InterruptedException {
    ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
    DataOutputStream outputStream = new DataOutputStream(bos);
    ImmutableBytesWritable outputKey = null;
    if (!lkv.isEmpty()) {
        for (KeyValue cell : lkv) {
            if (outputKey == null || Bytes.compareTo(outputKey.get(), outputKey.getOffset(),
                    outputKey.getLength(), cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) != 0) {
                // This a the first RowKey or a different from previous
                if (outputKey != null) { //It's a different RowKey, so we need to write it
                    ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray());
                    outputStream.close();
                    context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray);
                }
                outputKey = new ImmutableBytesWritable(cell.getRowArray(), cell.getRowOffset(),
                        cell.getRowLength());
                bos = new ByteArrayOutputStream(1024);
                outputStream = new DataOutputStream(bos);
            }
            /*
            The order of aggregation: type, index of column, length of value, value itself
             */
            int i = findIndex(cell);
            if (i == -1) {
                //That may happen when we load only local indexes. Since KV pairs for both
                // table and local index are going to the same physical table at that point
                // we skip those KVs that are not belongs to loca index
                continue;
            }
            outputStream.writeByte(cell.getTypeByte());
            WritableUtils.writeVLong(outputStream, cell.getTimestamp());
            WritableUtils.writeVInt(outputStream, i);
            WritableUtils.writeVInt(outputStream, cell.getValueLength());
            outputStream.write(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());

        }
        ImmutableBytesWritable aggregatedArray = new ImmutableBytesWritable(bos.toByteArray());
        outputStream.close();
        context.write(new TableRowkeyPair(tableName, outputKey), aggregatedArray);
    }
}

From source file:org.apache.phoenix.mapreduce.PhoenixInputSplit.java

License:Apache License

@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeString(output, regionLocation);
    WritableUtils.writeVLong(output, regionSize);

    Preconditions.checkNotNull(scans);//from  w  ww .j a  va2  s  . com
    WritableUtils.writeVInt(output, scans.size());
    for (Scan scan : scans) {
        ClientProtos.Scan protoScan = ProtobufUtil.toScan(scan);
        byte[] protoScanBytes = protoScan.toByteArray();
        WritableUtils.writeVInt(output, protoScanBytes.length);
        output.write(protoScanBytes);
    }
}

From source file:org.apache.phoenix.query.KeyRange.java

License:Apache License

private void writeBound(Bound bound, DataOutput out) throws IOException {
    // Encode unbound by writing a zero
    if (isUnbound(bound)) {
        WritableUtils.writeVInt(out, 0);
        return;/*  w  w w.j  a  v  a  2  s .c  o  m*/
    }
    // Otherwise, inclusive is positive and exclusive is negative, offset by 1
    byte[] range = getRange(bound);
    if (isInclusive(bound)) {
        WritableUtils.writeVInt(out, range.length + 1);
    } else {
        WritableUtils.writeVInt(out, -(range.length + 1));
    }
    out.write(range);
}

From source file:org.apache.phoenix.schema.ValueSchema.java

License:Apache License

@Override
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, minNullable);
    WritableUtils.writeVInt(out, fields.size() * (rowKeyOrderOptimizable ? -1 : 1));
    for (int i = 0; i < fields.size(); i++) {
        fields.get(i).write(out);/*from  www.ja va2s.c om*/
    }
}