Example usage for java.nio ByteBuffer position

List of usage examples for java.nio ByteBuffer position

Introduction

In this page you can find the example usage for java.nio ByteBuffer position.

Prototype

public final int position() 

Source Link

Document

Returns the position of this buffer.

Usage

From source file:com.blm.orc.ReaderImpl.java

private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, long maxFileLength)
        throws IOException {
    FSDataInputStream file = fs.open(path);

    // figure out the size of the file using the option or filesystem
    long size;//ww w. j  av  a 2s .  c  o  m
    if (maxFileLength == Long.MAX_VALUE) {
        size = fs.getFileStatus(path).getLen();
    } else {
        size = maxFileLength;
    }

    //read last bytes into buffer to get PostScript
    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
    file.seek(size - readSize);
    ByteBuffer buffer = ByteBuffer.allocate(readSize);
    file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());

    //read the PostScript
    //get length of PostScript
    int psLen = buffer.get(readSize - 1) & 0xff;
    ensureOrcFooter(file, path, psLen, buffer);
    int psOffset = readSize - 1 - psLen;
    CodedInputStream in = CodedInputStream.newInstance(buffer.array(), buffer.arrayOffset() + psOffset, psLen);
    OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in);

    checkOrcVersion(LOG, path, ps.getVersionList());

    int footerSize = (int) ps.getFooterLength();
    int metadataSize = (int) ps.getMetadataLength();
    OrcFile.WriterVersion writerVersion;
    if (ps.hasWriterVersion()) {
        writerVersion = getWriterVersion(ps.getWriterVersion());
    } else {
        writerVersion = OrcFile.WriterVersion.ORIGINAL;
    }

    //check compression codec
    switch (ps.getCompression()) {
    case NONE:
        break;
    case ZLIB:
        break;
    case SNAPPY:
        break;
    case LZO:
        break;
    default:
        throw new IllegalArgumentException("Unknown compression");
    }

    //check if extra bytes need to be read
    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
    if (extra > 0) {
        //more bytes need to be read, seek back to the right place and read extra bytes
        file.seek(size - readSize - extra);
        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
        file.readFully(extraBuf.array(), extraBuf.arrayOffset() + extraBuf.position(), extra);
        extraBuf.position(extra);
        //append with already read bytes
        extraBuf.put(buffer);
        buffer = extraBuf;
        buffer.position(0);
        buffer.limit(footerSize + metadataSize);
    } else {
        //footer is already in the bytes in buffer, just adjust position, length
        buffer.position(psOffset - footerSize - metadataSize);
        buffer.limit(psOffset);
    }

    // remember position for later
    buffer.mark();

    file.close();

    return new FileMetaInfo(ps.getCompression().toString(), (int) ps.getCompressionBlockSize(),
            (int) ps.getMetadataLength(), buffer, ps.getVersionList(), writerVersion);
}

From source file:com.microsoft.tfs.core.util.FileEncodingDetector.java

/**
 * Tests whether the given byte array looks like an ANSI text file with the
 * default text encoding, i.e. can be decoded with the current ANSI
 * character set. In multi-byte character sets (like Japanese, for example)
 * the entire byte array might not be converted entirely, because at the end
 * of array it might contain a broken multi-byte character. We still accept
 * this kind of files as ANSI ones if the not converted reminder of the
 * array is short enough.//from   w  w w.ja  va2s .c o m
 *
 * @param bytes
 *        the bytes to check for ANSI-ness (must not be <code>null</code>)
 * @param limit
 *        the maximum number of bytes to read.
 * @return true if the given bytes look like part of an ANSI text file,
 *         false if they do not (because they contain control characters or
 *         other patterns).
 */
protected static boolean looksLikeANSI(final byte[] bytes, final int limit) {
    final Charset charSet = CodePageMapping.getCharset(FileEncoding.getDefaultTextEncoding().getCodePage());

    final ByteBuffer byteBuffer = ByteBuffer.wrap(bytes, 0, limit);
    final CharBuffer charBuffer = CharBuffer.allocate(limit);

    final CharsetDecoder decoder = charSet.newDecoder();
    decoder.onUnmappableCharacter(CodingErrorAction.REPORT);
    decoder.onMalformedInput(CodingErrorAction.REPORT);

    final CoderResult rc = decoder.decode(byteBuffer, charBuffer, true);

    if (!rc.isError()) {
        return true;
    } else {
        return byteBuffer.position() > limit - 5;
    }
}

From source file:edu.umass.cs.utils.Util.java

/**
 * Transfer from src to dst without throwing exception if src.remaining() >
 * dst.remaining() but copying dst.remaining() bytes from src instead.
 *//*from   w ww  .  j a va  2s .c om*/
public static ByteBuffer put(ByteBuffer dst, ByteBuffer src) {
    if (src.remaining() < dst.remaining())
        return dst.put(src);
    int oldLimit = src.limit();
    src.limit(src.position() + dst.remaining());
    dst.put(src);
    src.limit(oldLimit);
    return dst;
    // byte[] buf = new byte[dst.remaining()];
    // src.get(buf);
    // return dst.put(buf);
}

From source file:com.healthmarketscience.jackcess.impl.OleUtil.java

private static ContentImpl createSimplePackageContent(OleBlobImpl blob, String prettyName, String className,
        String typeName, ByteBuffer blobBb, int dataBlockLen) {

    int dataBlockPos = blobBb.position();
    ByteBuffer bb = PageChannel.narrowBuffer(blobBb, dataBlockPos, dataBlockPos + dataBlockLen);

    int packageSig = bb.getShort();
    if (packageSig != PACKAGE_STREAM_SIGNATURE) {
        return new OtherContentImpl(blob, prettyName, className, typeName, dataBlockPos, dataBlockLen);
    }/*w ww  .j  a v a2 s  .  com*/

    String fileName = readZeroTermStr(bb);
    String filePath = readZeroTermStr(bb);
    int packageType = bb.getInt();

    if (packageType == PS_EMBEDDED_FILE) {

        int localFilePathLen = bb.getInt();
        String localFilePath = readStr(bb, bb.position(), localFilePathLen);
        int dataLen = bb.getInt();
        int dataPos = bb.position();
        bb.position(dataLen + dataPos);

        // remaining strings are in "reverse" order (local file path, file name,
        // file path).  these string usee a real utf charset, and therefore can
        // "fix" problems with ascii based names (so we prefer these strings to
        // the original strings we found)
        int strNum = 0;
        while (true) {

            int rem = bb.remaining();
            if (rem < 4) {
                break;
            }

            int strLen = bb.getInt();
            String remStr = readStr(bb, bb.position(), strLen * 2, OLE_UTF_CHARSET);

            switch (strNum) {
            case 0:
                localFilePath = remStr;
                break;
            case 1:
                fileName = remStr;
                break;
            case 2:
                filePath = remStr;
                break;
            default:
                // ignore
            }

            ++strNum;
        }

        return new SimplePackageContentImpl(blob, prettyName, className, typeName, dataPos, dataLen, fileName,
                filePath, localFilePath);
    }

    if (packageType == PS_LINKED_FILE) {

        bb.getShort(); //unknown
        String linkStr = readZeroTermStr(bb);

        return new LinkContentImpl(blob, prettyName, className, typeName, fileName, linkStr, filePath);
    }

    return new OtherContentImpl(blob, prettyName, className, typeName, dataBlockPos, dataBlockLen);
}

From source file:com.glaf.core.util.FileUtils.java

public static byte[] getBytes(InputStream inputStream) {
    if (inputStream == null) {
        return null;
    }/*from  w  w w. j  av a 2 s .  c  o m*/
    ByteArrayOutputStream output = null;
    try {
        ByteBuffer buffer = ByteBuffer.allocate(8192);
        ReadableByteChannel readChannel = Channels.newChannel(inputStream);
        output = new ByteArrayOutputStream(32 * 1024);
        WritableByteChannel writeChannel = Channels.newChannel(output);
        while ((readChannel.read(buffer)) > 0 || buffer.position() != 0) {
            buffer.flip();
            writeChannel.write(buffer);
            buffer.compact();
        }
        return output.toByteArray();
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    } finally {
        if (output != null) {
            try {
                output.close();
                output = null;
            } catch (IOException ex) {
            }
        }
    }
}

From source file:Main.java

/**
 * Decodes the specified URL as per RFC 3986, i.e. transforms
 * percent-encoded octets to characters by decoding with the UTF-8 character
 * set. This function is primarily intended for usage with
 * {@link URL} which unfortunately does not enforce proper URLs. As
 * such, this method will leniently accept invalid characters or malformed
 * percent-encoded octets and simply pass them literally through to the
 * result string. Except for rare edge cases, this will make unencoded URLs
 * pass through unaltered./*from w w w .ja v  a2 s  . com*/
 *
 * @param url The URL to decode, may be {@code null}.
 * @return The decoded URL or {@code null} if the input was
 * {@code null}.
 */
static String decodeUrl(String url) {
    String decoded = url;
    if (url != null && url.indexOf('%') >= 0) {
        int n = url.length();
        StringBuffer buffer = new StringBuffer();
        ByteBuffer bytes = ByteBuffer.allocate(n);
        for (int i = 0; i < n;) {
            if (url.charAt(i) == '%') {
                try {
                    do {
                        byte octet = (byte) Integer.parseInt(url.substring(i + 1, i + 3), 16);
                        bytes.put(octet);
                        i += 3;
                    } while (i < n && url.charAt(i) == '%');
                    continue;
                } catch (RuntimeException e) {
                    // malformed percent-encoded octet, fall through and
                    // append characters literally
                } finally {
                    if (bytes.position() > 0) {
                        bytes.flip();
                        buffer.append(UTF8.decode(bytes).toString());
                        bytes.clear();
                    }
                }
            }
            buffer.append(url.charAt(i++));
        }
        decoded = buffer.toString();
    }
    return decoded;
}

From source file:com.glaf.core.util.ByteBufferUtils.java

/**
 * @return a new copy of the data in @param buffer USUALLY YOU SHOULD USE
 *         ByteBuffer.duplicate() INSTEAD, which creates a new Buffer (so
 *         you can mutate its position without affecting the original)
 *         without copying the underlying array.
 */// ww w .  j  a va 2  s.  c o  m
public static ByteBuffer clone(ByteBuffer buffer) {
    assert buffer != null;

    if (buffer.remaining() == 0)
        return EMPTY_BYTE_BUFFER;

    ByteBuffer clone = ByteBuffer.allocate(buffer.remaining());

    if (buffer.hasArray()) {
        System.arraycopy(buffer.array(), buffer.arrayOffset() + buffer.position(), clone.array(), 0,
                buffer.remaining());
    } else {
        clone.put(buffer.duplicate());
        clone.flip();
    }

    return clone;
}

From source file:com.wandrell.example.swss.test.util.factory.SecureSoapMessages.java

/**
 * Generates the digest value for the SOAP secure header.
 * <p>/*from   w  w w  .ja  v  a  2 s.c  om*/
 * This is a codified password, with the help of the date and nonce values.
 * Both of these values should be found on the SOAP secure header.
 *
 * @param password
 *            password to digest
 * @param date
 *            date used on the SOAP header
 * @param nonce
 *            nonce used on the SOAP header
 * @return the digested password
 * @throws UnsupportedEncodingException
 *             if the UTF-8 encoding is not supported
 */
private static final String generateDigest(final String password, final String date, final String nonce)
        throws UnsupportedEncodingException {
    final ByteBuffer buf; // Buffers storing the data to digest
    byte[] toHash; // Bytes to generate the hash

    // Fills buffer with data to digest
    buf = ByteBuffer.allocate(1000);
    buf.put(Base64.decodeBase64(nonce));
    buf.put(date.getBytes("UTF-8"));
    buf.put(password.getBytes("UTF-8"));

    // Initializes hash bytes to the correct size
    toHash = new byte[buf.position()];
    buf.rewind();

    // Copies bytes from the buffer to the hash bytes
    buf.get(toHash);

    return Base64.encodeBase64String(DigestUtils.sha1(toHash));
}

From source file:de.csdev.ebus.command.EBusCommandUtils.java

/**
 * Builds an escaped master telegram part or if slaveData is used a complete telegram incl. master ACK and SYN
 *
 * @param source/*ww w .  j  ava 2  s .  c  o  m*/
 * @param target
 * @param command
 * @param masterData
 * @return
 * @throws EBusTypeException
 */
public static ByteBuffer buildPartMasterTelegram(byte source, byte target, byte[] command, byte[] masterData)
        throws EBusTypeException {

    ByteBuffer buf = ByteBuffer.allocate(50);

    buf.put(source); // QQ - Source
    buf.put(target); // ZZ - Target
    buf.put(command); // PB SB - Command
    buf.put((byte) masterData.length); // NN - Length, will be set later

    // add the escaped bytes
    for (byte b : masterData) {
        buf.put(escapeSymbol(b));
    }

    // calculate crc
    byte crc8 = EBusUtils.crc8(buf.array(), buf.position());

    buf.put(escapeSymbol(crc8));

    // set limit and reset position
    buf.limit(buf.position());
    buf.position(0);

    return buf;
}

From source file:com.blm.orc.ReaderImpl.java

/**
 * Ensure this is an ORC file to prevent users from trying to read text
 * files or RC files as ORC files.//from ww  w . j  av  a  2 s  .c  o m
 * @param in the file being read
 * @param path the filename for error messages
 * @param psLen the postscript length
 * @param buffer the tail of the file
 * @throws IOException
 */
static void ensureOrcFooter(FSDataInputStream in, Path path, int psLen, ByteBuffer buffer) throws IOException {
    int len = OrcFile.MAGIC.length();
    if (psLen < len + 1) {
        throw new IOException("Malformed ORC file " + path + ". Invalid postscript length " + psLen);
    }
    int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len;
    byte[] array = buffer.array();
    // now look for the magic string at the end of the postscript.
    if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) {
        // If it isn't there, this may be the 0.11.0 version of ORC.
        // Read the first 3 bytes of the file to check for the header
        in.seek(0);
        byte[] header = new byte[len];
        in.readFully(header, 0, len);
        // if it isn't there, this isn't an ORC file
        if (!Text.decode(header, 0, len).equals(OrcFile.MAGIC)) {
            throw new IOException("Malformed ORC file " + path + ". Invalid postscript.");
        }
    }
}