Example usage for java.nio ByteBuffer clear

List of usage examples for java.nio ByteBuffer clear

Introduction

In this page you can find the example usage for java.nio ByteBuffer clear.

Prototype

public final Buffer clear() 

Source Link

Document

Clears this buffer.

Usage

From source file:net.cellcloud.talk.stuff.PrimitiveSerializer.java

/** ???
 *///from w w w.  j  a  va  2 s.c  o m
public static void read(Primitive primitive, InputStream stream) {
    /*
    ??
    [version]{sutff}...{stuff}[dialect@tracker]
    
    [01.00]{sub=cloud:string}{pre=add:string}[Action@Ambrose]
    */

    try {
        byte phase = PARSE_PHASE_UNKNOWN;
        int read = 0;

        ByteBuffer buf = ByteBuffer.allocate(BLOCK);
        byte[] type = new byte[3];
        byte[] value = null;
        byte[] literal = null;
        int length = 0;

        while ((read = stream.read()) >= 0) {

            // ?
            switch (phase) {

            case PARSE_PHASE_VALUE:
                // 
                if (read == '\\') {
                    // ?
                    int next = stream.read();
                    if (next == TOKEN_OPEN_BRACE || next == TOKEN_CLOSE_BRACE || next == TOKEN_OPERATE_ASSIGN
                            || next == TOKEN_OPERATE_DECLARE) {
                        buf.put((byte) next);
                        ++length;
                    } else {
                        buf.put((byte) read);
                        buf.put((byte) next);
                        length += 2;
                    }

                    // 
                    continue;
                }

                if (read == TOKEN_OPERATE_DECLARE) {
                    // ?
                    buf.flip();
                    value = new byte[length];
                    buf.get(value, 0, length);
                    buf.clear();

                    phase = PARSE_PHASE_LITERAL;
                    length = 0;
                    continue;
                }

                buf.put((byte) read);
                ++length;
                break;

            case PARSE_PHASE_TYPE:
                if (read == TOKEN_OPERATE_ASSIGN) {
                    // ?
                    buf.flip();
                    buf.get(type);
                    buf.clear();

                    phase = PARSE_PHASE_VALUE;
                    length = 0;
                    continue;
                }
                // 
                buf.put((byte) read);
                break;

            case PARSE_PHASE_LITERAL:
                if (read == TOKEN_CLOSE_BRACE) {
                    // ??
                    buf.flip();
                    literal = new byte[length];
                    buf.get(literal, 0, length);
                    buf.clear();

                    // 
                    injectStuff(primitive, type, value, literal);

                    phase = PARSE_PHASE_DIALECT;
                    length = 0;
                    continue;
                }
                buf.put((byte) read);
                ++length;
                break;

            case PARSE_PHASE_STUFF:
                if (read == TOKEN_OPEN_BRACE) {
                    // ?
                    phase = PARSE_PHASE_TYPE;
                    buf.clear();
                }
                break;

            case PARSE_PHASE_VERSION:
                if (read == TOKEN_CLOSE_BRACKET) {
                    // ??
                    phase = PARSE_PHASE_STUFF;
                    continue;
                }
                buf.put((byte) read);
                break;

            case PARSE_PHASE_DIALECT:
                if (read == TOKEN_OPEN_BRACE) {
                    phase = PARSE_PHASE_TYPE;
                    buf.clear();
                } else if (read == TOKEN_OPEN_BRACKET) {
                    // ?
                    buf.clear();
                } else if (read == TOKEN_CLOSE_BRACKET) {
                    // ??
                    deserializeDialect(primitive, new String(buf.array(), 0, length, Charset.forName("UTF-8")));
                } else {
                    // ?
                    buf.put((byte) read);
                    ++length;
                }
                break;

            default:
                if (read == TOKEN_OPEN_BRACE) {
                    phase = PARSE_PHASE_TYPE;
                    buf.clear();
                } else if (read == TOKEN_OPEN_BRACKET) {
                    phase = PARSE_PHASE_VERSION;
                    buf.clear();
                }
                break;
            }
        }

        buf.clear();

    } catch (IOException e) {
        Logger.log(PrimitiveSerializer.class, e, LogLevel.ERROR);
    }
}

From source file:com.serenegiant.media.TLMediaEncoder.java

/**
 * Method to set byte array to the MediaCodec encoder
* if you use Surface to input data to encoder, you should not call this method
 * @param buffer//from ww w .  j a  v a2s. c om
 * @param lengthlength of byte array, zero means EOS.
 * @param presentationTimeUs
 */
//   protected void encode(final byte[] buffer, final int length, final long presentationTimeUs) {
protected void encode(final ByteBuffer buffer, int length, long presentationTimeUs) {
    if (!mIsRunning || !isRecording())
        return;
    while (mIsRunning) {
        final int inputBufferIndex = mMediaCodec.dequeueInputBuffer(TIMEOUT_USEC);
        if (inputBufferIndex >= 0) {
            final ByteBuffer inputBuffer = encoderInputBuffers[inputBufferIndex];
            inputBuffer.clear();
            if (buffer != null) {
                inputBuffer.put(buffer);
            }
            if (length <= 0) {
                // send EOS
                mIsEOS = true;
                if (DEBUG)
                    Log.i(TAG, "send BUFFER_FLAG_END_OF_STREAM");
                mMediaCodec.queueInputBuffer(inputBufferIndex, 0, 0, presentationTimeUs,
                        MediaCodec.BUFFER_FLAG_END_OF_STREAM);
            } else {
                mMediaCodec.queueInputBuffer(inputBufferIndex, 0, length, presentationTimeUs, 0);
            }
            break;
        } else if (inputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
            // wait for MediaCodec encoder is ready to encode
            // nothing to do here because MediaCodec#dequeueInputBuffer(TIMEOUT_USEC)
            // will wait for maximum TIMEOUT_USEC(10msec) on each call
        }
    }
}

From source file:org.alfresco.contentstore.ChecksumTest.java

private void assertEqual(InputStream expected, InputStream actual) throws IOException {
    ByteBuffer bb1 = ByteBuffer.allocate(1024);
    ByteBuffer bb2 = ByteBuffer.allocate(1024);

    try (ReadableByteChannel expectedChannel = Channels.newChannel(expected);
            ReadableByteChannel actualChannel = Channels.newChannel(actual)) {
        State state = new State();
        for (;;) {
            int numRead1 = expectedChannel.read(bb1);
            bb1.flip();/*from   ww w  .  ja va 2s.c o m*/

            int numRead2 = actualChannel.read(bb2);
            bb2.flip();

            assertEqual(bb1, bb2, state);

            if (numRead1 < 1) {
                break;
            }

            bb1.clear();
            bb2.clear();
        }
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.IABlockSender.java

/**
 * Sends upto maxChunks chunks of data. Used by Encoded Read.
 * /*from ww  w  .j av  a2s.c o m*/
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out, BlockingQueue<ByteBuffer> q)
        throws IOException {
    //LOG.info("anchor Send_packet "+seqno);
    // Sends multiple chunks in one packet with a single write().

    int len = (int) Math.min((endOffset - offset), (((long) bytesPerChecksum) * ((long) maxChunks)));
    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    //boolean lastDataPacket = offset + len == endOffset && len > 0;
    int packetLen = len + numChunks * checksumSize + 4;
    //initial packet
    pkt.clear();

    //header
    PacketHeader header = new PacketHeader(packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    int dataOff = checksumOff + checksumLen;
    /*
    LOG.info("real length of the packet " + (dataOff + len) + " maxchunks " + maxChunks
        + " num chunks " + numChunks);
    */
    //read data from the ring buffer. Due to some padding problems, we need a global cache.
    //may have a better design
    if (cache == null)
        try {
            cache = q.take();
        } catch (InterruptedException e) {
        }

    int r = cache.remaining();
    int taken = 0;
    while (r < len) {
        cache.get(buf, dataOff + taken, r - taken);
        try {
            LOG.info("before taken new package with remaining:" + r);
            cache = q.take();
        } catch (InterruptedException e) {
        }
        taken = r;
        r += cache.remaining();
    }

    //LOG.info("dataOff: "+dataOff+" taken: "+taken+" len:"+len);
    cache.get(buf, dataOff + taken, len - taken);

    //create checksum
    for (int i = checksumOff; i < checksumOff + checksumLen; i += checksumSize) {
        checksum.reset();
        int bufOff = (i - checksumOff) / checksumSize * bytesPerChecksum + dataOff;
        checksum.update(buf, bufOff, bytesPerChecksum);
        checksum.writeValue(buf, i, true);
    }
    //LOG.info("anchor Send_packet "+seqno+" Checksum_generated");

    try {
        if (blockInPosition >= 0) {
            //should not be used.
            LOG.warn("encoded read should not used transferTo().");
            //use transferTo(). Checks on out and blockIn are already done. 

            //SocketOutputStream sockOut = (SocketOutputStream)out;
            //first write the packet
            //sockOut.write(buf, 0, dataOff);
            // no need to flush. since we know out is not a buffered stream. 

            //sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), 
            //                        blockInPosition, len);

            //blockInPosition += len;
        } else {
            // normal transfer
            /* LOG.info("send packet with Length: "+len+" Offset: "+offset); */
            out.write(buf, 0, dataOff + len);
        }
        //LOG.info("anchor Send_packet "+seqno+" Sent");

    } catch (IOException e) {
        /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
        String ioem = e.getMessage();
        if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
            LOG.error("BlockSender.sendChunks() exception: ", e);
        }
        throw ioeToSocketException(e);
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:org.apache.hadoop.hdfs.server.datanode.PMBlockSender.java

/**
 * Sends upto maxChunks chunks of data. Used by Encoded Read.
 * //from  w  w w  .j  a  v  a 2  s.  c om
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out, BlockingQueue<ByteBuffer> q)
        throws IOException {
    //LOG.info("anchor Send_packet "+seqno);
    // Sends multiple chunks in one packet with a single write().

    int len = (int) Math.min((endOffset - offset), (((long) bytesPerChecksum) * ((long) maxChunks)));
    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    //boolean lastDataPacket = offset + len == endOffset && len > 0;
    int packetLen = len + numChunks * checksumSize + 4;
    //initial packet
    pkt.clear();

    //header
    PacketHeader header = new PacketHeader(packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    int dataOff = checksumOff + checksumLen;
    /*
    LOG.info("real length of the packet " + (dataOff + len) + " maxchunks " + maxChunks
        + " num chunks " + numChunks);
    */
    //read data from the ring buffer. Due to some padding problems, we need a global cache.
    //may have a better design

    if (cache == null)
        try {
            cache = q.take();
        } catch (InterruptedException e) {
        }

    int r = cache.remaining();
    int taken = 0;
    while (r < len) {
        cache.get(buf, dataOff + taken, r - taken);
        try {
            //LOG.info("before taken new package with remaining:"+r);
            cache = q.take();
        } catch (InterruptedException e) {
        }
        taken = r;
        r += cache.remaining();
    }

    //LOG.info("dataOff: "+dataOff+" taken: "+taken+" len:"+len);
    cache.get(buf, dataOff + taken, len - taken);

    //create checksum
    for (int i = checksumOff; i < checksumOff + checksumLen; i += checksumSize) {
        checksum.reset();
        int bufOff = (i - checksumOff) / checksumSize * bytesPerChecksum + dataOff;
        checksum.update(buf, bufOff, bytesPerChecksum);
        checksum.writeValue(buf, i, true);
    }
    //LOG.info("anchor Send_packet "+seqno+" Checksum_generated");

    try {
        if (blockInPosition >= 0) {
            //should not be used.
            LOG.warn("encoded read should not used transferTo().");
            //use transferTo(). Checks on out and blockIn are already done. 

            //SocketOutputStream sockOut = (SocketOutputStream)out;
            //first write the packet
            //sockOut.write(buf, 0, dataOff);
            // no need to flush. since we know out is not a buffered stream. 

            //sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), 
            //                        blockInPosition, len);

            //blockInPosition += len;
        } else {
            // normal transfer
            /* LOG.info("send packet with Length: "+len+" Offset: "+offset); */
            out.write(buf, 0, dataOff + len);
        }
        //LOG.info("anchor Send_packet "+seqno+" Sent");

    } catch (IOException e) {
        /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
        String ioem = e.getMessage();
        if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
            LOG.error("BlockSender.sendChunks() exception: ", e);
        }
        throw ioeToSocketException(e);
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:org.apache.kylin.engine.mr.steps.FactDistinctColumnsReducer.java

private void outputStatistics(List<Long> allCuboids) throws IOException, InterruptedException {
    // output written to baseDir/statistics/statistics-r-00000 (etc)
    String statisticsFileName = BatchConstants.CFG_OUTPUT_STATISTICS + "/"
            + BatchConstants.CFG_OUTPUT_STATISTICS;

    ByteBuffer valueBuf = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);

    // mapper overlap ratio at key -1
    long grandTotal = 0;
    for (HLLCounter hll : cuboidHLLMap.values()) {
        grandTotal += hll.getCountEstimate();
    }/*  w ww  .j  av a 2 s.  co m*/
    double mapperOverlapRatio = grandTotal == 0 ? 0 : (double) totalRowsBeforeMerge / grandTotal;
    mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(-1),
            new BytesWritable(Bytes.toBytes(mapperOverlapRatio)), statisticsFileName);

    // mapper number at key -2
    mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(-2),
            new BytesWritable(Bytes.toBytes(baseCuboidRowCountInMappers.size())), statisticsFileName);

    // sampling percentage at key 0
    mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(0L),
            new BytesWritable(Bytes.toBytes(samplingPercentage)), statisticsFileName);

    for (long i : allCuboids) {
        valueBuf.clear();
        cuboidHLLMap.get(i).writeRegisters(valueBuf);
        valueBuf.flip();
        mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(i),
                new BytesWritable(valueBuf.array(), valueBuf.limit()), statisticsFileName);
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * Sends upto maxChunks chunks of data.// ww  w .j  a  v a2s  .  c om
 * 
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) throws IOException {
    // Sends multiple chunks in one packet with a single write().

    int len = Math.min((int) (endOffset - offset), bytesPerChecksum * maxChunks);

    // truncate len so that any partial chunks will be sent as a final packet.
    // this is not necessary for correctness, but partial chunks are 
    // ones that may be recomputed and sent via buffer copy, so try to minimize
    // those bytes
    if (len > bytesPerChecksum && len % bytesPerChecksum != 0) {
        len -= len % bytesPerChecksum;
    }

    if (len == 0) {
        return 0;
    }

    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    int packetLen = len + numChunks * checksumSize + 4;
    pkt.clear();

    // write packet header
    pkt.putInt(packetLen);
    pkt.putLong(offset);
    pkt.putLong(seqno);
    pkt.put((byte) ((offset + len >= endOffset) ? 1 : 0));
    //why no ByteBuf.putBoolean()?
    pkt.putInt(len);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    if (checksumSize > 0 && checksumIn != null) {
        try {
            checksumIn.readFully(buf, checksumOff, checksumLen);
        } catch (IOException e) {
            LOG.warn(" Could not read or failed to veirfy checksum for data" + " at offset " + offset
                    + " for block " + block + " got : " + StringUtils.stringifyException(e));
            IOUtils.closeStream(checksumIn);
            checksumIn = null;
            if (corruptChecksumOk) {
                if (checksumOff < checksumLen) {
                    // Just fill the array with zeros.
                    Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
                }
            } else {
                throw e;
            }
        }
    }

    int dataOff = checksumOff + checksumLen;

    if (blockInPosition < 0) {
        //normal transfer
        IOUtils.readFully(blockIn, buf, dataOff, len);

        if (verifyChecksum) {
            int dOff = dataOff;
            int cOff = checksumOff;
            int dLeft = len;

            for (int i = 0; i < numChunks; i++) {
                checksum.reset();
                int dLen = Math.min(dLeft, bytesPerChecksum);
                checksum.update(buf, dOff, dLen);
                if (!checksum.compare(buf, cOff)) {
                    throw new ChecksumException("Checksum failed at " + (offset + len - dLeft), len);
                }
                dLeft -= dLen;
                dOff += dLen;
                cOff += checksumSize;
            }
        }

        // only recompute checksum if we can't trust the meta data due to 
        // concurrent writes
        if (memoizedBlock.hasBlockChanged(len)) {
            ChecksumUtil.updateChunkChecksum(buf, checksumOff, dataOff, len, checksum);
        }

        try {
            out.write(buf, 0, dataOff + len);
        } catch (IOException e) {
            throw ioeToSocketException(e);
        }
    } else {
        try {
            //use transferTo(). Checks on out and blockIn are already done. 
            SocketOutputStream sockOut = (SocketOutputStream) out;
            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            if (memoizedBlock.hasBlockChanged(len)) {
                fileChannel.position(blockInPosition);
                IOUtils.readFileChannelFully(fileChannel, buf, dataOff, len);

                ChecksumUtil.updateChunkChecksum(buf, checksumOff, dataOff, len, checksum);
                sockOut.write(buf, 0, dataOff + len);
            } else {
                //first write the packet
                sockOut.write(buf, 0, dataOff);
                // no need to flush. since we know out is not a buffered stream.
                sockOut.transferToFully(fileChannel, blockInPosition, len);
            }

            blockInPosition += len;

        } catch (IOException e) {
            /* exception while writing to the client (well, with transferTo(),
             * it could also be while reading from the local file).
             */
            throw ioeToSocketException(e);
        }
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:org.apache.hadoop.hdfs.tools.offlineEditsViewer.TestOfflineEditsViewer.java

/**
 * Compare two files, ignore trailing zeros at the end, for edits log the
 * trailing zeros do not make any difference, throw exception is the files are
 * not same/*from w w  w . ja v a2s. c  o  m*/
 *
 * @param filenameSmall first file to compare (doesn't have to be smaller)
 * @param filenameLarge second file to compare (doesn't have to be larger)
 */
private boolean filesEqualIgnoreTrailingZeros(String filenameSmall, String filenameLarge) throws IOException {

    ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall));
    ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge));
    // OEV outputs with the latest layout version, so tweak the old file's
    // contents to have latest version so checkedin binary files don't
    // require frequent updates
    small.put(3, (byte) NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);

    // now correct if it's otherwise
    if (small.capacity() > large.capacity()) {
        ByteBuffer tmpByteBuffer = small;
        small = large;
        large = tmpByteBuffer;
        String tmpFilename = filenameSmall;
        filenameSmall = filenameLarge;
        filenameLarge = tmpFilename;
    }

    // compare from 0 to capacity of small
    // the rest of the large should be all zeros
    small.position(0);
    small.limit(small.capacity());
    large.position(0);
    large.limit(small.capacity());

    // compares position to limit
    if (!small.equals(large)) {
        return false;
    }

    // everything after limit should be 0xFF
    int i = large.limit();
    large.clear();
    for (; i < large.capacity(); i++) {
        if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
            return false;
        }
    }

    return true;
}

From source file:com.tinspx.util.io.ChannelSourceTest.java

@Test
public void testByteBufferSource() throws IOException {
    int off = 443, len = 17167;
    ByteBuffer buf, direct;
    direct = ByteBuffer.allocateDirect(INPUT.length);
    assertTrue(direct.isDirect());//from  ww w .j a  va 2 s.co  m
    direct.put(INPUT);
    byte[] sub = Arrays.copyOfRange(INPUT, off, off + len);

    //full input
    buf = ByteBuffer.wrap(INPUT);
    ByteSourceTests.testByteSource(ChannelSource.of(buf), INPUT);
    assertEquals(0, buf.position());
    assertEquals(INPUT.length, buf.limit());

    buf = ByteBuffer.wrap(INPUT).asReadOnlyBuffer();
    ByteSourceTests.testByteSource(ChannelSource.of(buf), INPUT);
    assertEquals(0, buf.position());
    assertEquals(INPUT.length, buf.limit());

    direct.clear();
    buf = direct;
    ByteSourceTests.testByteSource(ChannelSource.of(buf), INPUT);
    assertEquals(0, buf.position());
    assertEquals(INPUT.length, buf.limit());

    //sub range of input
    buf = ByteBuffer.wrap(INPUT);
    buf.clear().position(off).limit(off + len);
    ByteSourceTests.testByteSource(ChannelSource.of(buf), sub);
    assertEquals(off, buf.position());
    assertEquals(off + len, buf.limit());

    buf = ByteBuffer.wrap(INPUT).asReadOnlyBuffer();
    buf.clear().position(off).limit(off + len);
    ByteSourceTests.testByteSource(ChannelSource.of(buf), sub);
    assertEquals(off, buf.position());
    assertEquals(off + len, buf.limit());

    direct.clear();
    buf = direct;
    buf.clear().position(off).limit(off + len);
    ByteSourceTests.testByteSource(ChannelSource.of(buf), sub);
    assertEquals(off, buf.position());
    assertEquals(off + len, buf.limit());
}