Example usage for java.nio ByteBuffer arrayOffset

List of usage examples for java.nio ByteBuffer arrayOffset

Introduction

In this page you can find the example usage for java.nio ByteBuffer arrayOffset.

Prototype

public final int arrayOffset() 

Source Link

Document

Returns the offset of the byte array which this buffer is based on, if there is one.

Usage

From source file:com.blm.orc.ReaderImpl.java

private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, long maxFileLength)
        throws IOException {
    FSDataInputStream file = fs.open(path);

    // figure out the size of the file using the option or filesystem
    long size;//from w w  w .  j  a  va  2 s .  c  om
    if (maxFileLength == Long.MAX_VALUE) {
        size = fs.getFileStatus(path).getLen();
    } else {
        size = maxFileLength;
    }

    //read last bytes into buffer to get PostScript
    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
    file.seek(size - readSize);
    ByteBuffer buffer = ByteBuffer.allocate(readSize);
    file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());

    //read the PostScript
    //get length of PostScript
    int psLen = buffer.get(readSize - 1) & 0xff;
    ensureOrcFooter(file, path, psLen, buffer);
    int psOffset = readSize - 1 - psLen;
    CodedInputStream in = CodedInputStream.newInstance(buffer.array(), buffer.arrayOffset() + psOffset, psLen);
    OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in);

    checkOrcVersion(LOG, path, ps.getVersionList());

    int footerSize = (int) ps.getFooterLength();
    int metadataSize = (int) ps.getMetadataLength();
    OrcFile.WriterVersion writerVersion;
    if (ps.hasWriterVersion()) {
        writerVersion = getWriterVersion(ps.getWriterVersion());
    } else {
        writerVersion = OrcFile.WriterVersion.ORIGINAL;
    }

    //check compression codec
    switch (ps.getCompression()) {
    case NONE:
        break;
    case ZLIB:
        break;
    case SNAPPY:
        break;
    case LZO:
        break;
    default:
        throw new IllegalArgumentException("Unknown compression");
    }

    //check if extra bytes need to be read
    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
    if (extra > 0) {
        //more bytes need to be read, seek back to the right place and read extra bytes
        file.seek(size - readSize - extra);
        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
        file.readFully(extraBuf.array(), extraBuf.arrayOffset() + extraBuf.position(), extra);
        extraBuf.position(extra);
        //append with already read bytes
        extraBuf.put(buffer);
        buffer = extraBuf;
        buffer.position(0);
        buffer.limit(footerSize + metadataSize);
    } else {
        //footer is already in the bytes in buffer, just adjust position, length
        buffer.position(psOffset - footerSize - metadataSize);
        buffer.limit(psOffset);
    }

    // remember position for later
    buffer.mark();

    file.close();

    return new FileMetaInfo(ps.getCompression().toString(), (int) ps.getCompressionBlockSize(),
            (int) ps.getMetadataLength(), buffer, ps.getVersionList(), writerVersion);
}

From source file:com.glaf.core.util.ByteBufferUtils.java

/**
 * @return a new copy of the data in @param buffer USUALLY YOU SHOULD USE
 *         ByteBuffer.duplicate() INSTEAD, which creates a new Buffer (so
 *         you can mutate its position without affecting the original)
 *         without copying the underlying array.
 *//*from ww w. ja  va  2  s  .c o m*/
public static ByteBuffer clone(ByteBuffer buffer) {
    assert buffer != null;

    if (buffer.remaining() == 0)
        return EMPTY_BYTE_BUFFER;

    ByteBuffer clone = ByteBuffer.allocate(buffer.remaining());

    if (buffer.hasArray()) {
        System.arraycopy(buffer.array(), buffer.arrayOffset() + buffer.position(), clone.array(), 0,
                buffer.remaining());
    } else {
        clone.put(buffer.duplicate());
        clone.flip();
    }

    return clone;
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

/**
 * Convert a few next bytes in the given buffer at the given position to
 * string. Used for error messages.//from  ww  w  .ja va  2  s  .  c  o  m
 */
private static String nextBytesToStr(ByteBuffer buf, int pos) {
    int maxBytes = buf.limit() - pos;
    int numBytes = Math.min(16, maxBytes);
    return Bytes.toStringBinary(buf.array(), buf.arrayOffset() + pos, numBytes)
            + (numBytes < maxBytes ? "..." : "");
}

From source file:net.darkmist.alib.io.BufferUtil.java

public static byte[] asBytes(ByteBuffer buf) {
    buf = buf.duplicate();/* w ww .  ja  v  a 2  s .  co m*/
    /* To use buf.array() the buffer must:
     *    be writable as the array will be writable
     *    have arrayOffset() == 0 or the array will not start at the right location
     *    the returned array must be the same length as the buffer's limit or it will be the wrong size.
     */
    if (!buf.isReadOnly() && buf.hasArray() && buf.arrayOffset() == 0) {
        logger.debug("!read-only, hasArray && offset is 0");
        byte[] ret = buf.array();

        if (ret.length == buf.limit())
            return ret;
        logger.debug("length of array !=limit, doing copy...");
    }

    byte[] bytes = new byte[buf.limit()];
    buf.get(bytes, 0, buf.limit());
    return bytes;
}

From source file:com.blm.orc.ReaderImpl.java

/**
 * Ensure this is an ORC file to prevent users from trying to read text
 * files or RC files as ORC files./* w  w w .  j  av  a 2s  .  c o m*/
 * @param in the file being read
 * @param path the filename for error messages
 * @param psLen the postscript length
 * @param buffer the tail of the file
 * @throws IOException
 */
static void ensureOrcFooter(FSDataInputStream in, Path path, int psLen, ByteBuffer buffer) throws IOException {
    int len = OrcFile.MAGIC.length();
    if (psLen < len + 1) {
        throw new IOException("Malformed ORC file " + path + ". Invalid postscript length " + psLen);
    }
    int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len;
    byte[] array = buffer.array();
    // now look for the magic string at the end of the postscript.
    if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) {
        // If it isn't there, this may be the 0.11.0 version of ORC.
        // Read the first 3 bytes of the file to check for the header
        in.seek(0);
        byte[] header = new byte[len];
        in.readFully(header, 0, len);
        // if it isn't there, this isn't an ORC file
        if (!Text.decode(header, 0, len).equals(OrcFile.MAGIC)) {
            throw new IOException("Malformed ORC file " + path + ". Invalid postscript.");
        }
    }
}

From source file:android.framework.util.jar.Manifest.java

private static void writeEntry(OutputStream os, Attributes.Name name, String value, CharsetEncoder encoder,
        ByteBuffer bBuf) throws IOException {
    String nameString = name.getName();
    os.write(nameString.getBytes(Charsets.US_ASCII));
    os.write(VALUE_SEPARATOR);//from   w  w w  .j a v a  2s .  c o  m

    encoder.reset();
    bBuf.clear().limit(LINE_LENGTH_LIMIT - nameString.length() - 2);

    CharBuffer cBuf = CharBuffer.wrap(value);

    while (true) {
        CoderResult r = encoder.encode(cBuf, bBuf, true);
        if (CoderResult.UNDERFLOW == r) {
            r = encoder.flush(bBuf);
        }
        os.write(bBuf.array(), bBuf.arrayOffset(), bBuf.position());
        os.write(LINE_SEPARATOR);
        if (CoderResult.UNDERFLOW == r) {
            break;
        }
        os.write(' ');
        bBuf.clear().limit(LINE_LENGTH_LIMIT - 1);
    }
}

From source file:org.apache.hadoop.hive.ql.io.orc.ExternalCache.java

private static OrcTail createOrcTailFromMs(HdfsFileStatusWithId file, ByteBuffer bb) throws IOException {
    if (bb == null)
        return null;
    FileStatus fs = file.getFileStatus();
    ByteBuffer copy = bb.duplicate();
    try {/*from  w ww . j  a v  a 2  s .  c om*/
        OrcTail orcTail = ReaderImpl.extractFileTail(copy, fs.getLen(), fs.getModificationTime());
        // trigger lazy read of metadata to make sure serialized data is not corrupted and readable
        orcTail.getStripeStatistics();
        return orcTail;
    } catch (Exception ex) {
        byte[] data = new byte[bb.remaining()];
        System.arraycopy(bb.array(), bb.arrayOffset() + bb.position(), data, 0, data.length);
        String msg = "Failed to parse the footer stored in cache for file ID " + file.getFileId() + " " + bb
                + " [ " + Hex.encodeHexString(data) + " ]";
        LOG.error(msg, ex);
        return null;
    }
}

From source file:org.apache.hadoop.hive.ql.io.orc.ReaderImpl.java

/**
 * Ensure this is an ORC file to prevent users from trying to read text
 * files or RC files as ORC files./* w  w w .j  av a 2 s . co  m*/
 * @param in the file being read
 * @param path the filename for error messages
 * @param psLen the postscript length
 * @param buffer the tail of the file
 * @throws IOException
 */
static void ensureOrcFooter(FSDataInputStream in, Path path, int psLen, ByteBuffer buffer) throws IOException {
    int len = OrcFile.MAGIC.length();
    if (psLen < len + 1) {
        throw new FileFormatException("Malformed ORC file " + path + ". Invalid postscript length " + psLen);
    }
    int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len;
    byte[] array = buffer.array();
    // now look for the magic string at the end of the postscript.
    if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) {
        // If it isn't there, this may be the 0.11.0 version of ORC.
        // Read the first 3 bytes of the file to check for the header
        in.seek(0);
        byte[] header = new byte[len];
        in.readFully(header, 0, len);
        // if it isn't there, this isn't an ORC file
        if (!Text.decode(header, 0, len).equals(OrcFile.MAGIC)) {
            throw new FileFormatException("Malformed ORC file " + path + ". Invalid postscript.");
        }
    }
}

From source file:org.apache.nutch.store.readable.StoreReadable.java

/**
 * Given a <code>ByteBuffer</code> representing an html file of an
 * <em>unknown</em> encoding,  read out 'charset' parameter in the meta tag
 * from the first <code>CHUNK_SIZE</code> bytes.
 * If there's no meta tag for Content-Type or no charset is specified,
 * the content is checked for a Unicode Byte Order Mark (BOM).
 * This will also cover non-byte oriented character encodings (UTF-16 only).
 * If no character set can be determined,
 * <code>null</code> is returned.  <br />
 * See also http://www.w3.org/International/questions/qa-html-encoding-declarations,
 * http://www.w3.org/TR/2011/WD-html5-diff-20110405/#character-encoding, and
 * http://www.w3.org/TR/REC-xml/#sec-guessing
 * <br />/*from   w  ww  . j  av a  2s  . c o m*/
 *
 * @param content <code>ByteBuffer</code> representation of an html file
 */

private static String sniffCharacterEncoding(ByteBuffer content) {
    System.out.println(
            "[STORE-READABLE]sniffCharacterEncoding----------------------------------------------------------");
    int length = Math.min(content.remaining(), CHUNK_SIZE);

    // We don't care about non-ASCII parts so that it's sufficient
    // to just inflate each byte to a 16-bit value by padding.
    // For instance, the sequence {0x41, 0x82, 0xb7} will be turned into
    // {U+0041, U+0082, U+00B7}.
    String str = "";
    try {
        str = new String(content.array(), content.arrayOffset() + content.position(), length,
                Charset.forName("ASCII").toString());
    } catch (UnsupportedEncodingException e) {
        // code should never come here, but just in case...
        return null;
    }

    Matcher metaMatcher = metaPattern.matcher(str);
    String encoding = null;
    if (metaMatcher.find()) {
        Matcher charsetMatcher = charsetPattern.matcher(metaMatcher.group(1));
        if (charsetMatcher.find())
            encoding = new String(charsetMatcher.group(1));
    }
    if (encoding == null) {
        // check for HTML5 meta charset
        metaMatcher = charsetPatternHTML5.matcher(str);
        if (metaMatcher.find()) {
            encoding = new String(metaMatcher.group(1));
        }
    }
    if (encoding == null) {
        // check for BOM
        if (length >= 3 && content.get(0) == (byte) 0xEF && content.get(1) == (byte) 0xBB
                && content.get(2) == (byte) 0xBF) {
            encoding = "UTF-8";
        } else if (length >= 2) {
            if (content.get(0) == (byte) 0xFF && content.get(1) == (byte) 0xFE) {
                encoding = "UTF-16LE";
            } else if (content.get(0) == (byte) 0xFE && content.get(1) == (byte) 0xFF) {
                encoding = "UTF-16BE";
            }
        }
    }

    return encoding;
}

From source file:org.apache.hadoop.hbase.KeyValueUtil.java

/**
 * Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
 * position to the start of the next KeyValue. Does not allocate a new array or copy data.
 * @param bb/*from w  w w.  j a v  a2s  . com*/
 * @param includesMvccVersion
 * @param includesTags
 */
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
        boolean includesTags) {
    if (bb.isDirect()) {
        throw new IllegalArgumentException("only supports heap buffers");
    }
    if (bb.remaining() < 1) {
        return null;
    }
    KeyValue keyValue = null;
    int underlyingArrayOffset = bb.arrayOffset() + bb.position();
    int keyLength = bb.getInt();
    int valueLength = bb.getInt();
    ByteBufferUtils.skip(bb, keyLength + valueLength);
    int tagsLength = 0;
    if (includesTags) {
        // Read short as unsigned, high byte first
        tagsLength = ((bb.get() & 0xff) << 8) ^ (bb.get() & 0xff);
        ByteBufferUtils.skip(bb, tagsLength);
    }
    int kvLength = (int) KeyValue.getKeyValueDataStructureSize(keyLength, valueLength, tagsLength);
    keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
    if (includesMvccVersion) {
        long mvccVersion = ByteBufferUtils.readVLong(bb);
        keyValue.setSequenceId(mvccVersion);
    }
    return keyValue;
}