Example usage for org.apache.spark.unsafe Platform BYTE_ARRAY_OFFSET

List of usage examples for org.apache.spark.unsafe Platform BYTE_ARRAY_OFFSET

Introduction

In this page you can find the example usage for org.apache.spark.unsafe Platform BYTE_ARRAY_OFFSET.

Prototype

int BYTE_ARRAY_OFFSET

To view the source code for org.apache.spark.unsafe Platform BYTE_ARRAY_OFFSET.

Click Source Link

Usage

From source file:edu.ucla.cs.wis.bigdatalog.spark.storage.map.UnsafeFixedWidthMonotonicAggregationMap.java

License:Apache License

public UnsafeRow getAggregationBufferFromUnsafeRow(UnsafeRow unsafeGroupingKeyRow) {
    // Probe our map using the serialized key
    final edu.ucla.cs.wis.bigdatalog.spark.storage.map.BytesToBytesMap.Location loc = map.lookup(
            unsafeGroupingKeyRow.getBaseObject(), unsafeGroupingKeyRow.getBaseOffset(),
            unsafeGroupingKeyRow.getSizeInBytes());
    if (!loc.isDefined()) {
        // This is the first time that we've seen this grouping key, so we'll insert a copy of the
        // empty aggregation buffer into the map:
        boolean putSucceeded = loc.putNewKey(unsafeGroupingKeyRow.getBaseObject(),
                unsafeGroupingKeyRow.getBaseOffset(), unsafeGroupingKeyRow.getSizeInBytes(),
                emptyAggregationBuffer, Platform.BYTE_ARRAY_OFFSET, emptyAggregationBuffer.length);
        if (!putSucceeded) {
            return null;
        }/*from w w w  .  j av  a2s . c  o  m*/
    }

    // Reset the pointer to point to the value that we just stored or looked up:
    final MemoryLocation address = loc.getValueAddress();
    currentAggregationBuffer.pointTo(address.getBaseObject(), address.getBaseOffset(),
            aggregationBufferSchema.length(), loc.getValueLength());
    return currentAggregationBuffer;
}

From source file:edu.ucla.cs.wis.bigdatalog.spark.storage.map.UnsafeFixedWidthMonotonicAggregationMap.java

License:Apache License

@SuppressWarnings("UseOfSystemOutOrSystemErr")
/*public void printPerfMetrics() {
if (!enablePerfMetrics) {//w w w . j a v a  2s  .  c o  m
    throw new IllegalStateException("Perf metrics not enabled");
}
System.out.println("Average probes per lookup: " + map.getAverageProbesPerLookup());
System.out.println("Number of hash collisions: " + map.getNumHashCollisions());
System.out.println("Time spent resizing (ns): " + map.getTimeSpentResizingNs());
System.out.println("Total memory consumption (bytes): " + map.getTotalMemoryConsumption());
}*/

public void readExternal(ObjectInput in) throws java.io.IOException {
    long start = System.currentTimeMillis();
    int nKeys = in.readInt();
    int initialCapacity = nKeys;
    if (initialCapacity == 0)
        initialCapacity = 1024 * 16;

    long pageSizeBytes;
    if (SparkEnv.get() != null)
        pageSizeBytes = SparkEnv.get().memoryManager().pageSizeBytes();
    else
        pageSizeBytes = new SparkConf().getSizeAsBytes("spark.buffer.pageSize", "16m");

    enablePerfMetrics = (in.readInt() == 1);

    int serializedSize = in.readInt();
    byte[] bytes = new byte[serializedSize];
    in.readFully(bytes);
    aggregationBufferSchema = (StructType) SparkSqlSerializer.deserialize(bytes,
            ClassTag$.MODULE$.apply(StructType.class));

    serializedSize = in.readInt();
    bytes = new byte[serializedSize];
    in.readFully(bytes);
    groupingKeySchema = (StructType) SparkSqlSerializer.deserialize(bytes,
            ClassTag$.MODULE$.apply(StructType.class));

    groupingKeyProjection = UnsafeProjection.create(groupingKeySchema);

    currentAggregationBuffer = new UnsafeRow();

    map = new edu.ucla.cs.wis.bigdatalog.spark.storage.map.BytesToBytesMap(initialCapacity, pageSizeBytes,
            enablePerfMetrics);

    int i = 0;
    byte[] keyBuffer = new byte[1024];
    byte[] valuesBuffer = new byte[1024];
    while (i < nKeys) {
        int keySize = in.readInt();
        int valuesSize = in.readInt();
        if (keySize > keyBuffer.length)
            keyBuffer = new byte[keySize];

        in.readFully(keyBuffer, 0, keySize);
        if (valuesSize > valuesBuffer.length)
            valuesBuffer = new byte[valuesSize];

        in.readFully(valuesBuffer, 0, valuesSize);

        // put it into binary map
        BytesToBytesMap.Location loc = map.lookup(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize);

        assert (!loc.isDefined()) : "Duplicated key found!";

        boolean putSucceeded = loc.putNewKey(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize, valuesBuffer,
                Platform.BYTE_ARRAY_OFFSET, valuesSize);

        if (!putSucceeded)
            throw new IOException("Could not allocate memory to deserialize BytesToBytesMap");

        i += 1;
    }

    //System.out.println("readExternal took " + (System.currentTimeMillis() - start) + " ms");
}

From source file:edu.ucla.cs.wis.bigdatalog.spark.storage.map.UnsafeFixedWidthMonotonicAggregationMap.java

License:Apache License

private void write(byte[] buffer, MemoryLocation addr, int length, ObjectOutput out)
        throws java.io.IOException {
    if (buffer.length < length)
        buffer = new byte[length];

    Platform.copyMemory(addr.getBaseObject(), addr.getBaseOffset(), buffer, Platform.BYTE_ARRAY_OFFSET, length);
    out.write(buffer, 0, length);//from w w w .java2 s  . c o m
}