Example usage for org.apache.hadoop.io WritableUtils readVInt

List of usage examples for org.apache.hadoop.io WritableUtils readVInt

Introduction

In this page you can find the example usage for org.apache.hadoop.io WritableUtils readVInt.

Prototype

public static int readVInt(DataInput stream) throws IOException 

Source Link

Document

Reads a zero-compressed encoded integer from input stream and returns it.

Usage

From source file:org.apache.phoenix.filter.ColumnProjectionFilter.java

License:Apache License

@Override
public void readFields(DataInput input) throws IOException {
    this.emptyCFName = WritableUtils.readCompressedByteArray(input);
    int familyMapSize = WritableUtils.readVInt(input);
    assert familyMapSize > 0;
    columnsTracker = new TreeMap<ImmutableBytesPtr, NavigableSet<ImmutableBytesPtr>>();
    while (familyMapSize > 0) {
        byte[] cf = WritableUtils.readCompressedByteArray(input);
        int qualifiersSize = WritableUtils.readVInt(input);
        NavigableSet<ImmutableBytesPtr> qualifiers = null;
        if (qualifiersSize > 0) {
            qualifiers = new TreeSet<ImmutableBytesPtr>();
            while (qualifiersSize > 0) {
                qualifiers.add(new ImmutableBytesPtr(WritableUtils.readCompressedByteArray(input)));
                qualifiersSize--;/*from   w w  w .  j  a  va  2 s .c  o  m*/
            }
        }
        columnsTracker.put(new ImmutableBytesPtr(cf), qualifiers);
        familyMapSize--;
    }
    int conditionOnlyCfsSize = WritableUtils.readVInt(input);
    usesEncodedColumnNames = conditionOnlyCfsSize > 0;
    emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst();
    conditionOnlyCfsSize = Math.abs(conditionOnlyCfsSize) - 1; // restore to the actual value.
    this.conditionOnlyCfs = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
    while (conditionOnlyCfsSize > 0) {
        this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input));
        conditionOnlyCfsSize--;
    }
}

From source file:org.apache.phoenix.filter.EncodedQualifiersColumnProjectionFilter.java

License:Apache License

@Override
public void readFields(DataInput input) throws IOException {
    this.emptyCFName = WritableUtils.readCompressedByteArray(input);
    int bitsetLongArraySize = WritableUtils.readVInt(input);
    long[] bitsetLongArray = new long[bitsetLongArraySize];
    for (int i = 0; i < bitsetLongArraySize; i++) {
        bitsetLongArray[i] = WritableUtils.readVLong(input);
    }/* w w  w  . ja  v a  2s.c  o m*/
    this.trackedColumns = BitSet.valueOf(bitsetLongArray);
    this.encodingScheme = QualifierEncodingScheme.values()[WritableUtils.readVInt(input)];
    int conditionOnlyCfsSize = WritableUtils.readVInt(input);
    this.conditionOnlyCfs = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
    while (conditionOnlyCfsSize > 0) {
        this.conditionOnlyCfs.add(WritableUtils.readCompressedByteArray(input));
        conditionOnlyCfsSize--;
    }
}

From source file:org.apache.phoenix.filter.MultiEncodedCQKeyValueComparisonFilter.java

License:Apache License

@Override
public void readFields(DataInput input) throws IOException {
    try {/* w  w  w. ja va2 s  .  co  m*/
        this.minQualifier = WritableUtils.readVInt(input);
        this.maxQualifier = WritableUtils.readVInt(input);
        this.whereExpressionMinQualifier = WritableUtils.readVInt(input);
        this.whereExpressionMaxQualifier = WritableUtils.readVInt(input);
        this.encodingScheme = QualifierEncodingScheme.values()[WritableUtils.readVInt(input)];
        super.readFields(input);
        try {
            allCFs = input.readBoolean();
            if (!allCFs) {
                essentialCF = Bytes.readByteArray(input);
            }
        } catch (EOFException e) { // Ignore as this will occur when a 4.10 client is used
        }
    } catch (DoNotRetryIOException e) {
        throw e;
    } catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry
        ServerUtil.throwIOException("MultiEncodedCQKeyValueComparisonFilter failed during writing", t);
    }
    initFilter(expression);
}

From source file:org.apache.phoenix.hive.mapreduce.PhoenixInputSplit.java

License:Apache License

@Override
public void readFields(DataInput in) throws IOException {
    super.readFields(in);

    int count = WritableUtils.readVInt(in);
    scans = Lists.newArrayListWithExpectedSize(count);
    for (int i = 0; i < count; i++) {
        byte[] protoScanBytes = new byte[WritableUtils.readVInt(in)];
        in.readFully(protoScanBytes);//from w w w  .j  av  a 2  s  .  c  o  m
        ClientProtos.Scan protoScan = ClientProtos.Scan.parseFrom(protoScanBytes);
        Scan scan = ProtobufUtil.toScan(protoScan);
        scans.add(scan);
    }
    init();

    query = WritableUtils.readString(in);
    regionSize = WritableUtils.readVLong(in);
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

private static List<IndexMaintainer> deserialize(byte[] buf, int offset, int length,
        boolean useProtoForIndexMaintainer) {
    ByteArrayInputStream stream = new ByteArrayInputStream(buf, offset, length);
    DataInput input = new DataInputStream(stream);
    List<IndexMaintainer> maintainers = Collections.emptyList();
    try {/*from ww w.j a v  a  2  s. co  m*/
        int size = WritableUtils.readVInt(input);
        boolean isDataTableSalted = size < 0;
        size = Math.abs(size);
        RowKeySchema rowKeySchema = new RowKeySchema();
        rowKeySchema.readFields(input);
        maintainers = Lists.newArrayListWithExpectedSize(size);
        for (int i = 0; i < size; i++) {
            if (useProtoForIndexMaintainer) {
                int protoSize = WritableUtils.readVInt(input);
                byte[] b = new byte[protoSize];
                input.readFully(b);
                org.apache.phoenix.coprocessor.generated.ServerCachingProtos.IndexMaintainer proto = ServerCachingProtos.IndexMaintainer
                        .parseFrom(b);
                maintainers.add(IndexMaintainer.fromProto(proto, rowKeySchema, isDataTableSalted));
            } else {
                IndexMaintainer maintainer = new IndexMaintainer(rowKeySchema, isDataTableSalted);
                maintainer.readFields(input);
                maintainers.add(maintainer);
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(e); // Impossible
    }
    return maintainers;
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

@Deprecated // Only called by code older than our 4.10 release
@Override//www. j a  v a 2  s  . co  m
public void readFields(DataInput input) throws IOException {
    int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input);
    isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0;
    nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1;
    int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input);
    boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
    if (hasViewIndexId) {
        // Fixed length
        viewIndexId = new byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
        input.readFully(viewIndexId);
    }
    int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1;
    indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        byte[] cf = Bytes.readByteArray(input);
        byte[] cq = Bytes.readByteArray(input);
        indexedColumns.add(new ColumnReference(cf, cq));
    }
    indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        PDataType type = PDataType.values()[WritableUtils.readVInt(input)];
        indexedColumnTypes.add(type);
    }
    int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input);
    isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0;
    int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1;
    coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns);
    for (int i = 0; i < nCoveredColumns; i++) {
        byte[] dataTableCf = Bytes.readByteArray(input);
        byte[] dataTableCq = Bytes.readByteArray(input);
        ColumnReference dataTableRef = new ColumnReference(dataTableCf, dataTableCq);
        byte[] indexTableCf = isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableCf) : dataTableCf;
        byte[] indexTableCq = IndexUtil.getIndexColumnName(dataTableCf, dataTableCq);
        ColumnReference indexTableRef = new ColumnReference(indexTableCf, indexTableCq);
        coveredColumnsMap.put(dataTableRef, indexTableRef);
    }
    // Hack to serialize whether the index row key is optimizable
    int len = WritableUtils.readVInt(input);
    if (len < 0) {
        rowKeyOrderOptimizable = false;
        len *= -1;
    } else {
        rowKeyOrderOptimizable = true;
    }
    indexTableName = new byte[len];
    input.readFully(indexTableName, 0, len);
    dataEmptyKeyValueCF = Bytes.readByteArray(input);
    len = WritableUtils.readVInt(input);
    //TODO remove this in the next major release
    boolean isNewClient = false;
    if (len < 0) {
        isNewClient = true;
        len = Math.abs(len);
    }
    byte[] emptyKeyValueCF = new byte[len];
    input.readFully(emptyKeyValueCF, 0, len);
    emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF);

    if (isNewClient) {
        int numIndexedExpressions = WritableUtils.readVInt(input);
        indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions);
        for (int i = 0; i < numIndexedExpressions; i++) {
            Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
            expression.readFields(input);
            indexedExpressions.add(expression);
        }
    } else {
        indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size());
        Iterator<ColumnReference> colReferenceIter = indexedColumns.iterator();
        Iterator<PDataType> dataTypeIter = indexedColumnTypes.iterator();
        while (colReferenceIter.hasNext()) {
            ColumnReference colRef = colReferenceIter.next();
            final PDataType dataType = dataTypeIter.next();
            indexedExpressions.add(new KeyValueColumnExpression(new PDatum() {

                @Override
                public boolean isNullable() {
                    return true;
                }

                @Override
                public SortOrder getSortOrder() {
                    return SortOrder.getDefault();
                }

                @Override
                public Integer getScale() {
                    return null;
                }

                @Override
                public Integer getMaxLength() {
                    return null;
                }

                @Override
                public PDataType getDataType() {
                    return dataType;
                }
            }, colRef.getFamily(), colRef.getQualifier()));
        }
    }

    rowKeyMetaData = newRowKeyMetaData();
    rowKeyMetaData.readFields(input);
    int nDataCFs = WritableUtils.readVInt(input);
    // Encode indexWALDisabled in nDataCFs
    indexWALDisabled = nDataCFs < 0;
    this.nDataCFs = Math.abs(nDataCFs) - 1;
    int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input);
    this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0;
    this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows);
    // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables.
    this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
    this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
    initCachedState();
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

License:Apache License

public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer proto,
        RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException {
    IndexMaintainer maintainer = new IndexMaintainer(dataTableRowKeySchema, isDataTableSalted);
    maintainer.nIndexSaltBuckets = proto.getSaltBuckets();
    maintainer.isMultiTenant = proto.getIsMultiTenant();
    maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null;
    List<ServerCachingProtos.ColumnReference> indexedColumnsList = proto.getIndexedColumnsList();
    maintainer.indexedColumns = new HashSet<ColumnReference>(indexedColumnsList.size());
    for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) {
        maintainer.indexedColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(),
                colRefFromProto.getQualifier().toByteArray()));
    }//  w  ww . jav  a 2s . co  m
    List<Integer> indexedColumnTypes = proto.getIndexedColumnTypeOrdinalList();
    maintainer.indexedColumnTypes = new ArrayList<PDataType>(indexedColumnTypes.size());
    for (Integer typeOrdinal : indexedColumnTypes) {
        maintainer.indexedColumnTypes.add(PDataType.values()[typeOrdinal]);
    }
    maintainer.indexTableName = proto.getIndexTableName().toByteArray();
    maintainer.rowKeyOrderOptimizable = proto.getRowKeyOrderOptimizable();
    maintainer.dataEmptyKeyValueCF = proto.getDataTableEmptyKeyValueColFamily().toByteArray();
    ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = proto.getEmptyKeyValueColFamily();
    maintainer.emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(),
            emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength());
    maintainer.indexedExpressions = new ArrayList<>();
    try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getIndexedExpressions().toByteArray())) {
        DataInput input = new DataInputStream(stream);
        while (stream.available() > 0) {
            int expressionOrdinal = WritableUtils.readVInt(input);
            Expression expression = ExpressionType.values()[expressionOrdinal].newInstance();
            expression.readFields(input);
            maintainer.indexedExpressions.add(expression);
        }
    }
    maintainer.rowKeyMetaData = newRowKeyMetaData(maintainer, dataTableRowKeySchema,
            maintainer.indexedExpressions.size(), isDataTableSalted, maintainer.isMultiTenant);
    try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getRowKeyMetadata().toByteArray())) {
        DataInput input = new DataInputStream(stream);
        maintainer.rowKeyMetaData.readFields(input);
    }
    maintainer.nDataCFs = proto.getNumDataTableColFamilies();
    maintainer.indexWALDisabled = proto.getIndexWalDisabled();
    maintainer.estimatedIndexRowKeyBytes = proto.getIndexRowKeyByteSize();
    maintainer.immutableRows = proto.getImmutable();
    List<ColumnInfo> indexedColumnInfoList = proto.getIndexedColumnInfoList();
    maintainer.indexedColumnsInfo = Sets.newHashSet();
    for (ColumnInfo info : indexedColumnInfoList) {
        maintainer.indexedColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName()));
    }
    // proto doesn't support single byte so need an explicit cast here
    maintainer.encodingScheme = PTable.QualifierEncodingScheme
            .fromSerializedValue((byte) proto.getEncodingScheme());
    maintainer.immutableStorageScheme = PTable.ImmutableStorageScheme
            .fromSerializedValue((byte) proto.getImmutableStorageScheme());
    maintainer.isLocalIndex = proto.getIsLocalIndex();

    List<ServerCachingProtos.ColumnReference> dataTableColRefsForCoveredColumnsList = proto
            .getDataTableColRefForCoveredColumnsList();
    List<ServerCachingProtos.ColumnReference> indexTableColRefsForCoveredColumnsList = proto
            .getIndexTableColRefForCoveredColumnsList();
    maintainer.coveredColumnsMap = Maps
            .newHashMapWithExpectedSize(dataTableColRefsForCoveredColumnsList.size());
    boolean encodedColumnNames = maintainer.encodingScheme != NON_ENCODED_QUALIFIERS;
    Iterator<ServerCachingProtos.ColumnReference> indexTableColRefItr = indexTableColRefsForCoveredColumnsList
            .iterator();
    for (ServerCachingProtos.ColumnReference colRefFromProto : dataTableColRefsForCoveredColumnsList) {
        ColumnReference dataTableColRef = new ColumnReference(colRefFromProto.getFamily().toByteArray(),
                colRefFromProto.getQualifier().toByteArray());
        ColumnReference indexTableColRef;
        if (encodedColumnNames) {
            ServerCachingProtos.ColumnReference fromProto = indexTableColRefItr.next();
            indexTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(),
                    fromProto.getQualifier().toByteArray());
        } else {
            byte[] cq = IndexUtil.getIndexColumnName(dataTableColRef.getFamily(),
                    dataTableColRef.getQualifier());
            byte[] cf = maintainer.isLocalIndex
                    ? IndexUtil.getLocalIndexColumnFamily(dataTableColRef.getFamily())
                    : dataTableColRef.getFamily();
            indexTableColRef = new ColumnReference(cf, cq);
        }
        maintainer.coveredColumnsMap.put(dataTableColRef, indexTableColRef);
    }
    maintainer.initCachedState();
    return maintainer;
}

From source file:org.apache.phoenix.index.PhoenixIndexBuilder.java

License:Apache License

@Override
public List<Mutation> executeAtomicOp(Increment inc) throws IOException {
    byte[] opBytes = inc.getAttribute(ATOMIC_OP_ATTRIB);
    if (opBytes == null) { // Unexpected
        return null;
    }// w  ww .ja  va2  s .c  o  m
    inc.setAttribute(ATOMIC_OP_ATTRIB, null);
    Put put = null;
    Delete delete = null;
    // We cannot neither use the time stamp in the Increment to set the Get time range
    // nor set the Put/Delete time stamp and have this be atomic as HBase does not
    // handle that. Though we disallow using ON DUPLICATE KEY clause when the
    // CURRENT_SCN is set, we still may have a time stamp set as of when the table
    // was resolved on the client side. We need to ignore this as well due to limitations
    // in HBase, but this isn't too bad as the time will be very close the the current
    // time anyway.
    long ts = HConstants.LATEST_TIMESTAMP;
    byte[] rowKey = inc.getRow();
    final Get get = new Get(rowKey);
    if (isDupKeyIgnore(opBytes)) {
        get.setFilter(new FirstKeyOnlyFilter());
        Result result = this.env.getRegion().get(get);
        return result.isEmpty() ? convertIncrementToPutInSingletonList(inc) : Collections.<Mutation>emptyList();
    }
    ByteArrayInputStream stream = new ByteArrayInputStream(opBytes);
    DataInputStream input = new DataInputStream(stream);
    boolean skipFirstOp = input.readBoolean();
    short repeat = input.readShort();
    final int[] estimatedSizeHolder = { 0 };
    List<Pair<PTable, List<Expression>>> operations = Lists.newArrayListWithExpectedSize(3);
    while (true) {
        ExpressionVisitor<Void> visitor = new StatelessTraverseAllExpressionVisitor<Void>() {
            @Override
            public Void visit(KeyValueColumnExpression expression) {
                get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier());
                estimatedSizeHolder[0]++;
                return null;
            }
        };
        try {
            int nExpressions = WritableUtils.readVInt(input);
            List<Expression> expressions = Lists.newArrayListWithExpectedSize(nExpressions);
            for (int i = 0; i < nExpressions; i++) {
                Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
                expression.readFields(input);
                expressions.add(expression);
                expression.accept(visitor);
            }
            PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input);
            PTable table = PTableImpl.createFromProto(tableProto);
            operations.add(new Pair<>(table, expressions));
        } catch (EOFException e) {
            break;
        }
    }
    int estimatedSize = estimatedSizeHolder[0];
    if (get.getFamilyMap().isEmpty()) {
        get.setFilter(new FirstKeyOnlyFilter());
    }
    MultiKeyValueTuple tuple;
    List<Cell> flattenedCells = null;
    List<Cell> cells = ((HRegion) this.env.getRegion()).get(get, false);
    if (cells.isEmpty()) {
        if (skipFirstOp) {
            if (operations.size() <= 1 && repeat <= 1) {
                return convertIncrementToPutInSingletonList(inc);
            }
            repeat--; // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE)
        }
        // Base current state off of new row
        flattenedCells = flattenCells(inc, estimatedSize);
        tuple = new MultiKeyValueTuple(flattenedCells);
    } else {
        // Base current state off of existing row
        tuple = new MultiKeyValueTuple(cells);
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (int opIndex = 0; opIndex < operations.size(); opIndex++) {
        Pair<PTable, List<Expression>> operation = operations.get(opIndex);
        PTable table = operation.getFirst();
        List<Expression> expressions = operation.getSecond();
        for (int j = 0; j < repeat; j++) { // repeater loop
            ptr.set(rowKey);
            // Sort the list of cells (if they've been flattened in which case they're not necessarily
            // ordered correctly). We only need the list sorted if the expressions are going to be
            // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop.
            if (flattenedCells != null) {
                Collections.sort(flattenedCells, KeyValue.COMPARATOR);
            }
            PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false);
            int adjust = table.getBucketNum() == null ? 1 : 2;
            for (int i = 0; i < expressions.size(); i++) {
                Expression expression = expressions.get(i);
                ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
                expression.evaluate(tuple, ptr);
                PColumn column = table.getColumns().get(i + adjust);
                Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
                // We are guaranteed that the two column will have the
                // same type.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
                        expression.getSortOrder(), expression.getMaxLength(), expression.getScale(),
                        column.getMaxLength(), column.getScale())) {
                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(),
                            column.getScale());
                }
                column.getDataType().coerceBytes(ptr, value, expression.getDataType(),
                        expression.getMaxLength(), expression.getScale(), expression.getSortOrder(),
                        column.getMaxLength(), column.getScale(), column.getSortOrder(),
                        table.rowKeyOrderOptimizable());
                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                row.setValue(column, bytes);
            }
            flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize);
            List<Mutation> mutations = row.toRowMutations();
            for (Mutation source : mutations) {
                flattenCells(source, flattenedCells);
            }
            tuple.setKeyValues(flattenedCells);
        }
        // Repeat only applies to first statement
        repeat = 1;
    }

    List<Mutation> mutations = Lists.newArrayListWithExpectedSize(2);
    for (int i = 0; i < tuple.size(); i++) {
        Cell cell = tuple.getValue(i);
        if (Type.codeToType(cell.getTypeByte()) == Type.Put) {
            if (put == null) {
                put = new Put(rowKey);
                transferAttributes(inc, put);
                mutations.add(put);
            }
            put.add(cell);
        } else {
            if (delete == null) {
                delete = new Delete(rowKey);
                transferAttributes(inc, delete);
                mutations.add(delete);
            }
            delete.addDeleteMarker(cell);
        }
    }
    return mutations;
}

From source file:org.apache.phoenix.iterate.NonAggregateRegionScannerFactory.java

License:Apache License

private static OrderedResultIterator deserializeFromScan(Scan scan, RegionScanner s) {
    byte[] topN = scan.getAttribute(BaseScannerRegionObserver.TOPN);
    if (topN == null) {
        return null;
    }/*  w  w  w.j a v  a  2s.c o m*/
    ByteArrayInputStream stream = new ByteArrayInputStream(topN); // TODO: size?
    try {
        DataInputStream input = new DataInputStream(stream);
        int thresholdBytes = WritableUtils.readVInt(input);
        int limit = WritableUtils.readVInt(input);
        int estimatedRowSize = WritableUtils.readVInt(input);
        int size = WritableUtils.readVInt(input);
        List<OrderByExpression> orderByExpressions = Lists.newArrayListWithExpectedSize(size);
        for (int i = 0; i < size; i++) {
            OrderByExpression orderByExpression = new OrderByExpression();
            orderByExpression.readFields(input);
            orderByExpressions.add(orderByExpression);
        }
        PTable.QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan);
        ResultIterator inner = new RegionScannerResultIterator(s,
                EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan), encodingScheme);
        return new OrderedResultIterator(inner, orderByExpressions, thresholdBytes, limit >= 0 ? limit : null,
                null, estimatedRowSize);
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
}

From source file:org.apache.phoenix.iterate.NonAggregateRegionScannerFactory.java

License:Apache License

private Expression[] deserializeArrayPostionalExpressionInfoFromScan(Scan scan, RegionScanner s,
        Set<KeyValueColumnExpression> arrayKVRefs) {
    byte[] specificArrayIdx = scan.getAttribute(BaseScannerRegionObserver.SPECIFIC_ARRAY_INDEX);
    if (specificArrayIdx == null) {
        return null;
    }//from   w  ww.j  a  v  a2 s.c om
    KeyValueSchema.KeyValueSchemaBuilder builder = new KeyValueSchema.KeyValueSchemaBuilder(0);
    ByteArrayInputStream stream = new ByteArrayInputStream(specificArrayIdx);
    try {
        DataInputStream input = new DataInputStream(stream);
        int arrayKVRefSize = WritableUtils.readVInt(input);
        for (int i = 0; i < arrayKVRefSize; i++) {
            PTable.ImmutableStorageScheme scheme = EncodedColumnsUtil.getImmutableStorageScheme(scan);
            KeyValueColumnExpression kvExp = scheme != PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN
                    ? new SingleCellColumnExpression()
                    : new KeyValueColumnExpression();
            kvExp.readFields(input);
            arrayKVRefs.add(kvExp);
        }
        int arrayKVFuncSize = WritableUtils.readVInt(input);
        Expression[] arrayFuncRefs = new Expression[arrayKVFuncSize];
        for (int i = 0; i < arrayKVFuncSize; i++) {
            ArrayIndexFunction arrayIdxFunc = new ArrayIndexFunction();
            arrayIdxFunc.readFields(input);
            arrayFuncRefs[i] = arrayIdxFunc;
            builder.addField(arrayIdxFunc);
        }
        kvSchema = builder.build();
        kvSchemaBitSet = ValueBitSet.newInstance(kvSchema);
        return arrayFuncRefs;
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
}