Example usage for java.nio ByteBuffer arrayOffset

List of usage examples for java.nio ByteBuffer arrayOffset

Introduction

In this page you can find the example usage for java.nio ByteBuffer arrayOffset.

Prototype

public final int arrayOffset() 

Source Link

Document

Returns the offset of the byte array which this buffer is based on, if there is one.

Usage

From source file:de.dfki.kiara.jsonrpc.JsonRpcMessage.java

private static JsonNode readFromBuffer(JsonRpcProtocol protocol, ByteBuffer data) throws IOException {
    byte[] array;
    int arrayOffset;
    int arrayLength;
    int oldPos = data.position();
    if (data.hasArray()) {
        array = data.array();// w ww  .  j  a  v a  2 s  .  c  o m
        arrayOffset = data.arrayOffset();
        arrayLength = data.remaining();
    } else {
        array = new byte[data.remaining()];
        data.get(array);
        arrayOffset = 0;
        arrayLength = array.length;
    }
    data.position(oldPos);

    JsonNode node;
    try (JsonParser parser = protocol.getObjectReader().getFactory().createParser(array, arrayOffset,
            arrayLength)) {
        node = parser.readValueAsTree();
    }
    return node;
}

From source file:org.usergrid.utils.JsonUtils.java

public static JsonNode nodeFromByteBuffer(ByteBuffer byteBuffer) {
    if ((byteBuffer == null) || !byteBuffer.hasRemaining()) {
        return null;
    }/* w w  w .  j  ava2  s .  com*/

    JsonNode obj = null;
    try {
        obj = smileMapper.readValue(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(),
                byteBuffer.remaining(), JsonNode.class);
    } catch (Exception e) {
        logger.error("Error parsing SMILE bytes", e);
    }
    return obj;
}

From source file:org.apache.hadoop.hbase.util.UnsafeAccess.java

/**
 * Reads bytes at the given offset as an int value.
 * @param buf/* ww w. ja v  a 2 s.c o m*/
 * @param offset
 * @return int value at offset
 */
static int getAsInt(ByteBuffer buf, int offset) {
    if (buf.isDirect()) {
        return theUnsafe.getInt(((DirectBuffer) buf).address() + offset);
    }
    return theUnsafe.getInt(buf.array(), BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset);
}

From source file:org.apache.hadoop.hbase.util.UnsafeAccess.java

/**
 * Reads bytes at the given offset as a long value.
 * @param buf/*from w ww  .  j  a  va2  s. co m*/
 * @param offset
 * @return long value at offset
 */
static long getAsLong(ByteBuffer buf, int offset) {
    if (buf.isDirect()) {
        return theUnsafe.getLong(((DirectBuffer) buf).address() + offset);
    }
    return theUnsafe.getLong(buf.array(), BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset);
}

From source file:org.apache.hadoop.hbase.io.hfile.FixedFileTrailer.java

/**
 * Reads a file trailer from the given file.
 *
 * @param istream the input stream with the ability to seek. Does not have to
 *          be buffered, as only one read operation is made.
 * @param fileSize the file size. Can be obtained using
 *          {@link org.apache.hadoop.fs.FileSystem#getFileStatus(
 *          org.apache.hadoop.fs.Path)}.
 * @return the fixed file trailer read/*from w  ww  . j a va  2 s.  c om*/
 * @throws IOException if failed to read from the underlying stream, or the
 *           trailer is corrupted, or the version of the trailer is
 *           unsupported
 */
public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fileSize) throws IOException {
    int bufferSize = MAX_TRAILER_SIZE;
    long seekPoint = fileSize - bufferSize;
    if (seekPoint < 0) {
        // It is hard to imagine such a small HFile.
        seekPoint = 0;
        bufferSize = (int) fileSize;
    }

    istream.seek(seekPoint);
    ByteBuffer buf = ByteBuffer.allocate(bufferSize);
    istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit());

    // Read the version from the last int of the file.
    buf.position(buf.limit() - Bytes.SIZEOF_INT);
    int version = buf.getInt();

    // Extract the major and minor versions.
    //version ??major version??minor version
    int majorVersion = extractMajorVersion(version);
    int minorVersion = extractMinorVersion(version);

    HFile.checkFormatVersion(majorVersion); // throws IAE if invalid

    int trailerSize = getTrailerSize(majorVersion);

    FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion);
    fft.deserialize(new DataInputStream(
            new ByteArrayInputStream(buf.array(), buf.arrayOffset() + bufferSize - trailerSize, trailerSize)));
    return fft;
}

From source file:org.apache.hadoop.hbase.util.UnsafeAccess.java

/**
 * Reads bytes at the given offset as a short value.
 * @param buf/* w  w  w .  ja va 2  s  .  c o  m*/
 * @param offset
 * @return short value at offset
 */
static short getAsShort(ByteBuffer buf, int offset) {
    if (buf.isDirect()) {
        return theUnsafe.getShort(((DirectBuffer) buf).address() + offset);
    }
    return theUnsafe.getShort(buf.array(), BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset);
}

From source file:org.apache.cassandra.utils.ByteBufferUtil.java

/**
 * You should almost never use this.  Instead, use the write* methods to avoid copies.
 */// w  ww  .j  a  v  a2s  . c  o  m
public static byte[] getArray(ByteBuffer buffer) {
    int length = buffer.remaining();

    if (buffer.hasArray()) {
        int start = buffer.position();
        if (buffer.arrayOffset() == 0 && start == 0 && length == buffer.array().length)
            return buffer.array();
        else
            return Arrays.copyOfRange(buffer.array(), start + buffer.arrayOffset(),
                    start + length + buffer.arrayOffset());
    }
    // else, DirectByteBuffer.get() is the fastest route
    byte[] bytes = new byte[length];
    buffer.duplicate().get(bytes);

    return bytes;
}

From source file:org.apache.cassandra.utils.ByteBufferUtil.java

/**
 * Transfer bytes from one ByteBuffer to another.
 * This function acts as System.arrayCopy() but for ByteBuffers.
 *
 * @param src the source ByteBuffer/*from   w w  w  .  java  2  s  . co  m*/
 * @param srcPos starting position in the source ByteBuffer
 * @param dst the destination ByteBuffer
 * @param dstPos starting position in the destination ByteBuffer
 * @param length the number of bytes to copy
 */
public static void arrayCopy(ByteBuffer src, int srcPos, ByteBuffer dst, int dstPos, int length) {
    if (src.hasArray() && dst.hasArray()) {
        System.arraycopy(src.array(), src.arrayOffset() + srcPos, dst.array(), dst.arrayOffset() + dstPos,
                length);
    } else {
        if (src.limit() - srcPos < length || dst.limit() - dstPos < length)
            throw new IndexOutOfBoundsException();

        for (int i = 0; i < length; i++) {
            dst.put(dstPos++, src.get(srcPos++));
        }
    }
}

From source file:org.apache.tajo.storage.orc.OrcScanner.java

private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, long maxFileLength)
        throws IOException {
    FSDataInputStream file = fs.open(path);

    // figure out the size of the file using the option or filesystem
    long size;//from  ww w  .  j  ava  2  s .  c  o  m
    if (maxFileLength == Long.MAX_VALUE) {
        size = fs.getFileStatus(path).getLen();
    } else {
        size = maxFileLength;
    }

    //read last bytes into buffer to get PostScript
    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
    ByteBuffer buffer = ByteBuffer.allocate(readSize);
    assert buffer.position() == 0;
    file.readFully((size - readSize), buffer.array(), buffer.arrayOffset(), readSize);
    buffer.position(0);

    //read the PostScript
    //get length of PostScript
    int psLen = buffer.get(readSize - 1) & 0xff;
    ensureOrcFooter(file, path, psLen, buffer);
    int psOffset = readSize - 1 - psLen;
    OrcProto.PostScript ps = extractPostScript(buffer, path, psLen, psOffset);

    int footerSize = (int) ps.getFooterLength();
    int metadataSize = (int) ps.getMetadataLength();

    //check if extra bytes need to be read
    ByteBuffer fullFooterBuffer = null;
    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
    if (extra > 0) {
        //more bytes need to be read, seek back to the right place and read extra bytes
        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
        file.readFully((size - readSize - extra), extraBuf.array(),
                extraBuf.arrayOffset() + extraBuf.position(), extra);
        extraBuf.position(extra);
        //append with already read bytes
        extraBuf.put(buffer);
        buffer = extraBuf;
        buffer.position(0);
        fullFooterBuffer = buffer.slice();
        buffer.limit(footerSize + metadataSize);
    } else {
        //footer is already in the bytes in buffer, just adjust position, length
        buffer.position(psOffset - footerSize - metadataSize);
        fullFooterBuffer = buffer.slice();
        buffer.limit(psOffset);
    }

    // remember position for later
    buffer.mark();

    file.close();

    return new FileMetaInfo(ps.getCompression().toString(), (int) ps.getCompressionBlockSize(),
            (int) ps.getMetadataLength(), buffer, ps.getVersionList(),
            org.apache.orc.OrcFile.WriterVersion.FUTURE, fullFooterBuffer);
}

From source file:org.apache.hadoop.hbase.util.UnsafeAccess.java

/**
 * Put a byte value out to the specified BB position in big-endian format.
 * @param buf the byte buffer/* w  w w  .j  a v  a2  s  .  c o  m*/
 * @param offset position in the buffer
 * @param b byte to write out
 * @return incremented offset
 */
public static int putByte(ByteBuffer buf, int offset, byte b) {
    if (buf.isDirect()) {
        theUnsafe.putByte(((DirectBuffer) buf).address() + offset, b);
    } else {
        theUnsafe.putByte(buf.array(), BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset, b);
    }
    return offset + 1;
}