Example usage for org.apache.spark.sql.catalyst.expressions UnsafeRow getBaseOffset

List of usage examples for org.apache.spark.sql.catalyst.expressions UnsafeRow getBaseOffset

Introduction

In this page you can find the example usage for org.apache.spark.sql.catalyst.expressions UnsafeRow getBaseOffset.

Prototype

public long getBaseOffset() 

Source Link

Usage

From source file:com.splicemachine.db.iapi.types.SQLTime.java

License:Apache License

/**
 *
 * Read data into a Project Tungsten Format (UnsafeRow).  We read
 * data as two ints./*from   w w  w  .j  a  v a  2  s  . co m*/
 *
 * @param unsafeRow
 * @param ordinal
 * @throws StandardException
  */
@Override
public void read(UnsafeRow unsafeRow, int ordinal) throws StandardException {
    if (unsafeRow.isNullAt(ordinal))
        setToNull();
    else {
        long offsetAndSize = unsafeRow.getLong(ordinal);
        int offset = (int) (offsetAndSize >> 32);
        encodedTime = Platform.getInt(unsafeRow.getBaseObject(), unsafeRow.getBaseOffset() + (long) offset);
        encodedTimeFraction = Platform.getInt(unsafeRow.getBaseObject(),
                unsafeRow.getBaseOffset() + (long) offset + 4L);
        isNull = false;
    }
}

From source file:com.splicemachine.db.iapi.types.SQLTimestamp.java

License:Apache License

/**
 *
 * Read from Project Tungsten format (UnsafeRow).  Timestamp is
 * read as 3 ints./*from w  ww  .j  av a 2s  . co  m*/
 *
 *
 * @param unsafeRow
 * @param ordinal
 */
@Override
public void read(UnsafeRow unsafeRow, int ordinal) throws StandardException {
    if (unsafeRow.isNullAt(ordinal))
        setToNull();
    else {
        long offsetAndSize = unsafeRow.getLong(ordinal);
        int offset = (int) (offsetAndSize >> 32);
        encodedDate = Platform.getInt(unsafeRow.getBaseObject(), unsafeRow.getBaseOffset() + (long) offset);
        encodedTime = Platform.getInt(unsafeRow.getBaseObject(),
                unsafeRow.getBaseOffset() + (long) offset + 4L);
        nanos = Platform.getInt(unsafeRow.getBaseObject(), unsafeRow.getBaseOffset() + (long) offset + 8L);
        isNull = false;
    }
}

From source file:edu.ucla.cs.wis.bigdatalog.spark.storage.map.UnsafeFixedWidthMonotonicAggregationMap.java

License:Apache License

public UnsafeRow getAggregationBufferFromUnsafeRow(UnsafeRow unsafeGroupingKeyRow) {
    // Probe our map using the serialized key
    final edu.ucla.cs.wis.bigdatalog.spark.storage.map.BytesToBytesMap.Location loc = map.lookup(
            unsafeGroupingKeyRow.getBaseObject(), unsafeGroupingKeyRow.getBaseOffset(),
            unsafeGroupingKeyRow.getSizeInBytes());
    if (!loc.isDefined()) {
        // This is the first time that we've seen this grouping key, so we'll insert a copy of the
        // empty aggregation buffer into the map:
        boolean putSucceeded = loc.putNewKey(unsafeGroupingKeyRow.getBaseObject(),
                unsafeGroupingKeyRow.getBaseOffset(), unsafeGroupingKeyRow.getSizeInBytes(),
                emptyAggregationBuffer, Platform.BYTE_ARRAY_OFFSET, emptyAggregationBuffer.length);
        if (!putSucceeded) {
            return null;
        }/*from   w  w  w. java 2  s. co  m*/
    }

    // Reset the pointer to point to the value that we just stored or looked up:
    final MemoryLocation address = loc.getValueAddress();
    currentAggregationBuffer.pointTo(address.getBaseObject(), address.getBaseOffset(),
            aggregationBufferSchema.length(), loc.getValueLength());
    return currentAggregationBuffer;
}

From source file:edu.ucla.cs.wis.bigdatalog.spark.storage.set.hashset.UnsafeFixedWidthSet.java

License:Apache License

public void insert(InternalRow keyRow) {
    UnsafeRow unsafeGroupingKeyRow = (UnsafeRow) keyRow;
    // Probe our set using the serialized key
    final edu.ucla.cs.wis.bigdatalog.spark.storage.set.hashset.BytesSet.Location loc = set.lookup(
            unsafeGroupingKeyRow.getBaseObject(), unsafeGroupingKeyRow.getBaseOffset(),
            unsafeGroupingKeyRow.getSizeInBytes());

    if (!loc.isDefined()) {
        // This is the first time that we've seen this grouping key, so we'll insert a copy of the
        // empty aggregation buffer into the set:
        boolean putSucceeded = loc.putNewKey(unsafeGroupingKeyRow.getBaseObject(),
                unsafeGroupingKeyRow.getBaseOffset(), unsafeGroupingKeyRow.getSizeInBytes());

        if (!putSucceeded)
            throw new RuntimeException(
                    "Not enough memory to copy value " + unsafeGroupingKeyRow.toString() + " into set.");
        //System.out.println("Inserted " + unsafeGroupingKeyRow + " into set. # entries: " + set.numElements());
    }/*from w  w  w . j av  a2  s.  com*/
}

From source file:edu.ucla.cs.wis.bigdatalog.spark.storage.set.hashset.UnsafeFixedWidthSet.java

License:Apache License

public void ifNotExistsInsert(InternalRow keyRow, HashSet diffSet) {
    UnsafeRow unsafeGroupingKeyRow = (UnsafeRow) keyRow;
    // Probe our set using the serialized key
    final edu.ucla.cs.wis.bigdatalog.spark.storage.set.hashset.BytesSet.Location loc = set.lookup(
            unsafeGroupingKeyRow.getBaseObject(), unsafeGroupingKeyRow.getBaseOffset(),
            unsafeGroupingKeyRow.getSizeInBytes());
    if (!loc.isDefined())
        diffSet.insert(keyRow);/*from   w w  w. j a  v a 2 s.co  m*/
}