Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.apache.hadoop.hbase.ipc.IPCUtil.java

/**
 * Write out header, param, and cell block if there is one.
 * @param dos//from   ww w  .  ja  v  a2s.  c o m
 * @param header
 * @param param
 * @param cellBlock
 * @return Total number of bytes written.
 * @throws IOException
 */
static int write(final OutputStream dos, final Message header, final Message param, final ByteBuffer cellBlock)
        throws IOException {
    // Must calculate total size and write that first so other side can read it all in in one
    // swoop.  This is dictated by how the server is currently written.  Server needs to change
    // if we are to be able to write without the length prefixing.
    int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(header, param);
    if (cellBlock != null)
        totalSize += cellBlock.remaining();
    return write(dos, header, param, cellBlock, totalSize);
}

From source file:org.apache.hadoop.hbase.client.Mutation.java

static void checkRow(ByteBuffer row) {
    if (row == null) {
        throw new IllegalArgumentException("Row buffer is null");
    }//from w w w.  j a  v a  2 s . c  o  m
    if (row.remaining() == 0) {
        throw new IllegalArgumentException("Row length is 0");
    }
    if (row.remaining() > HConstants.MAX_ROW_LENGTH) {
        throw new IllegalArgumentException(
                "Row length " + row.remaining() + " is > " + HConstants.MAX_ROW_LENGTH);
    }
}

From source file:com.easemob.dataexport.utils.ConversionUtils.java

public static UUID uuid(ByteBuffer bb) {
    if (bb == null) {
        return null;
    }//  www  . j  av a  2  s  . co m
    if (bb.remaining() < 16) {
        return null;
    }
    bb = bb.slice();
    return new UUID(bb.getLong(), bb.getLong());
}

From source file:io.blobkeeper.file.util.FileUtils.java

public static long getCrc(@NotNull File file) {
    CRC32 crc = new CRC32();

    while (true) {
        ByteBuffer buffer = ByteBuffer.allocate(CHUNK_SIZE);
        while (buffer.hasRemaining()) {
            int bytes = 0;
            try {
                bytes = file.getFileChannel().read(buffer);
            } catch (IOException e) {
                log.error("Can't read blob file " + file, e);
                throw new IllegalArgumentException(e);
            }/*from ww  w  . ja v a2s  .c  o m*/
            if (bytes < 0) {
                break;
            }
        }
        buffer.flip();
        if (buffer.remaining() == 0) {
            break;
        } else {
            crc.update(buffer.array());
        }
    }

    return crc.getValue();
}

From source file:com.easemob.dataexport.utils.ConversionUtils.java

public static String string(ByteBuffer bytes) {
    if (bytes == null) {
        return null;
    }// w w  w  .j  a va  2 s  . com
    return string(bytes.array(), bytes.arrayOffset() + bytes.position(), bytes.remaining(), UTF8_ENCODING);
}

From source file:com.healthmarketscience.jackcess.impl.OleUtil.java

/**
 * creates the appropriate ContentImpl for the given blob.
 *//*from w ww.  j  a v  a  2 s . c  o m*/
private static ContentImpl parseContent(OleBlobImpl blob) throws IOException {
    ByteBuffer bb = PageChannel.wrap(blob.getBytes());

    if ((bb.remaining() < 2) || (bb.getShort() != PACKAGE_SIGNATURE)) {
        return new UnknownContentImpl(blob);
    }

    // read outer package header
    int headerSize = bb.getShort();
    int objType = bb.getInt();
    int prettyNameLen = bb.getShort();
    int classNameLen = bb.getShort();
    int prettyNameOff = bb.getShort();
    int classNameOff = bb.getShort();
    int objSize = bb.getInt();
    String prettyName = readStr(bb, prettyNameOff, prettyNameLen);
    String className = readStr(bb, classNameOff, classNameLen);
    bb.position(headerSize);

    // read ole header
    int oleVer = bb.getInt();
    int format = bb.getInt();

    if (oleVer != OLE_VERSION) {
        return new UnknownContentImpl(blob);
    }

    int typeNameLen = bb.getInt();
    String typeName = readStr(bb, bb.position(), typeNameLen);
    bb.getLong(); // unused
    int dataBlockLen = bb.getInt();
    int dataBlockPos = bb.position();

    if (SIMPLE_PACKAGE_TYPE.equalsIgnoreCase(typeName)) {
        return createSimplePackageContent(blob, prettyName, className, typeName, bb, dataBlockLen);
    }

    // if COMPOUND_FACTORY is null, the poi library isn't available, so just
    // load compound data as "other"
    if ((COMPOUND_FACTORY != null) && (bb.remaining() >= COMPOUND_STORAGE_SIGNATURE.length)
            && ByteUtil.matchesRange(bb, bb.position(), COMPOUND_STORAGE_SIGNATURE)) {
        return COMPOUND_FACTORY.createCompoundPackageContent(blob, prettyName, className, typeName, bb,
                dataBlockLen);
    }

    // this is either some other "special" (as yet unhandled) format, or it is
    // simply an embedded file (or it is compound data and poi isn't available)
    return new OtherContentImpl(blob, prettyName, className, typeName, dataBlockPos, dataBlockLen);
}

From source file:com.glaf.core.util.ByteBufferUtils.java

/**
 * buffer?//ww w.j a  va 2  s.c  o m
 * 
 * @param buffer
 * @return
 */
public static byte[] toArray(ByteBuffer buffer) {
    // ?heap buffer
    if (buffer.hasArray()) {
        byte[] array = buffer.array();
        int from = buffer.arrayOffset() + buffer.position();
        return Arrays.copyOfRange(array, from, from + buffer.remaining());
    }
    //  direct buffer
    else {
        byte[] to = new byte[buffer.remaining()];
        buffer.slice().get(to);
        return to;
    }
}

From source file:org.apache.hadoop.fs.TestEnhancedByteBufferAccess.java

private static byte[] byteBufferToArray(ByteBuffer buf) {
    byte resultArray[] = new byte[buf.remaining()];
    buf.get(resultArray);//from w ww.  ja  va  2s .c  o  m
    buf.flip();
    return resultArray;
}

From source file:com.blm.orc.ReaderImpl.java

private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, long maxFileLength)
        throws IOException {
    FSDataInputStream file = fs.open(path);

    // figure out the size of the file using the option or filesystem
    long size;//from   w  w w.  j a  va 2 s .com
    if (maxFileLength == Long.MAX_VALUE) {
        size = fs.getFileStatus(path).getLen();
    } else {
        size = maxFileLength;
    }

    //read last bytes into buffer to get PostScript
    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
    file.seek(size - readSize);
    ByteBuffer buffer = ByteBuffer.allocate(readSize);
    file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());

    //read the PostScript
    //get length of PostScript
    int psLen = buffer.get(readSize - 1) & 0xff;
    ensureOrcFooter(file, path, psLen, buffer);
    int psOffset = readSize - 1 - psLen;
    CodedInputStream in = CodedInputStream.newInstance(buffer.array(), buffer.arrayOffset() + psOffset, psLen);
    OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in);

    checkOrcVersion(LOG, path, ps.getVersionList());

    int footerSize = (int) ps.getFooterLength();
    int metadataSize = (int) ps.getMetadataLength();
    OrcFile.WriterVersion writerVersion;
    if (ps.hasWriterVersion()) {
        writerVersion = getWriterVersion(ps.getWriterVersion());
    } else {
        writerVersion = OrcFile.WriterVersion.ORIGINAL;
    }

    //check compression codec
    switch (ps.getCompression()) {
    case NONE:
        break;
    case ZLIB:
        break;
    case SNAPPY:
        break;
    case LZO:
        break;
    default:
        throw new IllegalArgumentException("Unknown compression");
    }

    //check if extra bytes need to be read
    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
    if (extra > 0) {
        //more bytes need to be read, seek back to the right place and read extra bytes
        file.seek(size - readSize - extra);
        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
        file.readFully(extraBuf.array(), extraBuf.arrayOffset() + extraBuf.position(), extra);
        extraBuf.position(extra);
        //append with already read bytes
        extraBuf.put(buffer);
        buffer = extraBuf;
        buffer.position(0);
        buffer.limit(footerSize + metadataSize);
    } else {
        //footer is already in the bytes in buffer, just adjust position, length
        buffer.position(psOffset - footerSize - metadataSize);
        buffer.limit(psOffset);
    }

    // remember position for later
    buffer.mark();

    file.close();

    return new FileMetaInfo(ps.getCompression().toString(), (int) ps.getCompressionBlockSize(),
            (int) ps.getMetadataLength(), buffer, ps.getVersionList(), writerVersion);
}

From source file:org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils.java

/**
 * Read the list of ranges from the file.
 * @param file the file to read//from   w w w . j  a v a2s  . c o m
 * @param base the base of the stripe
 * @param ranges the disk ranges within the stripe to read
 * @return the bytes read for each disk range, which is the same length as
 *    ranges
 * @throws IOException
 */
static DiskRangeList readDiskRanges(FSDataInputStream file, ZeroCopyReaderShim zcr, long base,
        DiskRangeList range, boolean doForceDirect) throws IOException {
    if (range == null)
        return null;
    DiskRangeList prev = range.prev;
    if (prev == null) {
        prev = new DiskRangeListMutateHelper(range);
    }
    while (range != null) {
        if (range.hasData()) {
            range = range.next;
            continue;
        }
        int len = (int) (range.getEnd() - range.getOffset());
        long off = range.getOffset();
        file.seek(base + off);
        if (zcr != null) {
            boolean hasReplaced = false;
            while (len > 0) {
                ByteBuffer partial = zcr.readBuffer(len, false);
                BufferChunk bc = new BufferChunk(partial, off);
                if (!hasReplaced) {
                    range.replaceSelfWith(bc);
                    hasReplaced = true;
                } else {
                    range.insertAfter(bc);
                }
                range = bc;
                int read = partial.remaining();
                len -= read;
                off += read;
            }
        } else if (doForceDirect) {
            ByteBuffer directBuf = ByteBuffer.allocateDirect(len);
            readDirect(file, len, directBuf);
            range = range.replaceSelfWith(new BufferChunk(directBuf, range.getOffset()));
        } else {
            byte[] buffer = new byte[len];
            file.readFully(buffer, 0, buffer.length);
            range = range.replaceSelfWith(new BufferChunk(ByteBuffer.wrap(buffer), range.getOffset()));
        }
        range = range.next;
    }
    return prev.next;
}