Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:jext2.DataInode.java

/**
 * Write data in buffer to disk. This works best when whole blocks which
 * are a multiple of blocksize in size are written. Partial blocks are
 * written by first reading the block and then writing the new data
 * to that buffer than write that new buffer to disk.
 * @throws NoSpaceLeftOnDevice/*  w  w  w  .j  a  v  a  2 s.c  o  m*/
 * @throws FileTooLarge
 */
public int writeData(ByteBuffer buf, long offset) throws JExt2Exception, NoSpaceLeftOnDevice, FileTooLarge {
    /*
     * Note on sparse file support:
     * getBlocksAllocate does not care if there are holes. Just write as much
     * blocks as the buffer requires at the desired location an set inode.size
     * accordingly.
     */

    int blocksize = superblock.getBlocksize();
    long start = offset / blocksize;
    long end = (buf.capacity() + blocksize) / blocksize + start;
    int startOff = (int) (offset % blocksize);

    if (startOff > 0)
        end += 1;

    buf.rewind();

    while (start < end) {
        LinkedList<Long> blockNrs = accessData().getBlocksAllocate(start, 1);
        int bytesLeft = buf.capacity() - buf.position();

        if (bytesLeft < blocksize || startOff > 0) { /* write partial block */
            ByteBuffer onDisk = blockAccess.read(blockNrs.getFirst());

            onDisk.position(startOff);

            assert onDisk.limit() == blocksize;

            buf.limit(buf.position() + Math.min(bytesLeft, onDisk.remaining()));

            onDisk.put(buf);

            onDisk.position(startOff);
            blockAccess.writeFromBufferUnsynchronized((blockNrs.getFirst() & 0xffffffff) * blocksize, onDisk);
        } else { /* write whole block */
            buf.limit(buf.position() + blocksize);

            blockAccess.writeFromBufferUnsynchronized((blockNrs.getFirst() & 0xffffffff) * blocksize, buf);
        }

        start += 1;
        startOff = 0;
        accessData().unlockHierarchyChanges();

    }
    int written = buf.position();
    assert written == buf.capacity();

    /* increase inode.size if we grew the file */
    if (offset + written > getSize()) { /* file grew */
        setStatusChangeTime(new Date());
        setSize(offset + written);
    }

    return written;
}

From source file:org.apache.hadoop.hbase.io.HalfHFileReader.java

@Override
public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread) {
    final HFileScanner s = super.getScanner(cacheBlocks, pread);
    return new HFileScanner() {
        final HFileScanner delegate = s;
        public boolean atEnd = false;

        public ByteBuffer getKey() {
            if (atEnd)
                return null;
            return delegate.getKey();
        }//from   w w w  .ja  v a  2  s.  c  om

        public String getKeyString() {
            if (atEnd)
                return null;

            return delegate.getKeyString();
        }

        public ByteBuffer getValue() {
            if (atEnd)
                return null;

            return delegate.getValue();
        }

        public String getValueString() {
            if (atEnd)
                return null;

            return delegate.getValueString();
        }

        public KeyValue getKeyValue() {
            if (atEnd)
                return null;

            return delegate.getKeyValue();
        }

        public boolean next() throws IOException {
            if (atEnd)
                return false;

            boolean b = delegate.next();
            if (!b) {
                return b;
            }
            // constrain the bottom.
            if (!top) {
                ByteBuffer bb = getKey();
                if (getComparator().compare(bb.array(), bb.arrayOffset(), bb.limit(), splitkey, 0,
                        splitkey.length) >= 0) {
                    atEnd = true;
                    return false;
                }
            }
            return true;
        }

        public boolean seekBefore(byte[] key) throws IOException {
            return seekBefore(key, 0, key.length);
        }

        public boolean seekBefore(byte[] key, int offset, int length) throws IOException {
            if (top) {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
                    return false;
                }
            } else {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) >= 0) {
                    return seekBefore(splitkey, 0, splitkey.length);
                }
            }
            return this.delegate.seekBefore(key, offset, length);
        }

        public boolean seekTo() throws IOException {
            if (top) {
                int r = this.delegate.seekTo(splitkey);
                if (r < 0) {
                    // midkey is < first key in file
                    return this.delegate.seekTo();
                }
                if (r > 0) {
                    return this.delegate.next();
                }
                return true;
            }

            boolean b = delegate.seekTo();
            if (!b) {
                return b;
            }
            // Check key.
            ByteBuffer k = this.delegate.getKey();
            return this.delegate.getReader().getComparator().compare(k.array(), k.arrayOffset(), k.limit(),
                    splitkey, 0, splitkey.length) < 0;
        }

        public int seekTo(byte[] key) throws IOException {
            return seekTo(key, 0, key.length);
        }

        public int seekTo(byte[] key, int offset, int length) throws IOException {
            if (top) {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
                    return -1;
                }
            } else {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) >= 0) {
                    // we would place the scanner in the second half.
                    // it might be an error to return false here ever...
                    boolean res = delegate.seekBefore(splitkey, 0, splitkey.length);
                    if (!res) {
                        throw new IOException(
                                "Seeking for a key in bottom of file, but key exists in top of file, failed on seekBefore(midkey)");
                    }
                    return 1;
                }
            }
            return delegate.seekTo(key, offset, length);
        }

        public Reader getReader() {
            return this.delegate.getReader();
        }

        public boolean isSeeked() {
            return this.delegate.isSeeked();
        }
    };
}

From source file:org.apache.hadoop.ipc.RpcSSLEngineAbstr.java

protected ByteBuffer handleBufferUnderflow(ByteBuffer buffer) {
    // If there is no size issue, return the same buffer and let the
    // peer read more data
    if (sslEngine.getSession().getPacketBufferSize() < buffer.limit()) {
        return buffer;
    } else {//ww  w. ja  va 2  s  .co m
        ByteBuffer newBuffer = enlargePacketBuffer(buffer);
        buffer.flip();
        newBuffer.put(buffer);
        return newBuffer;
    }
}

From source file:com.joyent.manta.http.entity.DigestedEntity.java

@Override
public void writeTo(final OutputStream out) throws IOException {
    digest.reset(); // reset the digest state in case we're in a retry

    // If our wrapped entity is backed by a buffer of some form
    // we can read easily read the whole buffer into our message digest.
    if (wrapped instanceof MemoryBackedEntity) {
        final MemoryBackedEntity entity = (MemoryBackedEntity) wrapped;
        final ByteBuffer backingBuffer = entity.getBackingBuffer();

        if (backingBuffer.hasArray()) {
            final byte[] bytes = backingBuffer.array();
            final int offset = backingBuffer.arrayOffset();
            final int position = backingBuffer.position();
            final int limit = backingBuffer.limit();

            digest.update(bytes, offset + position, limit - position);
            backingBuffer.position(limit);

            wrapped.writeTo(out);/* ww  w.j  a  va  2s . c om*/
        }
    } else {
        try (DigestOutputStream dout = new DigestOutputStream(digest);
                TeeOutputStream teeOut = new TeeOutputStream(out, dout)) {
            wrapped.writeTo(teeOut);
            teeOut.flush();
        }
    }
}

From source file:tanks10.SendAndReceive.java

/**
 * Umieszczenie bufora w kolejce do wysania
 *///from  ww  w .  j  av  a 2  s.c om
public void addToSendBuffers(ByteBuffer in) {
    if (closed) {
        return;
    }
    if (kill) {
        closeSession();
    } else {

        // blokada tylko dotyczy operowania na wskanikach bufora
        synchronized (sendBuffers) {
            final TextMessage message;
            try {
                byte[] arrayContent = new byte[in.limit()];
                in.get(arrayContent);
                String content = new String(arrayContent, "UTF-8");
                message = new TextMessage(content);
            } catch (UnsupportedEncodingException e) {
                throw new RuntimeException(e);
            }

            try {
                session.sendMessage(message);
            } catch (Exception e) {
                LOG.log(Level.WARNING, "send message failed to " + session.getRemoteAddress().toString(), e);
                closeSession();
            }
        }
    }

}

From source file:com.buaa.cfs.nfs3.OpenFileCtx.java

@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request, long cachedOffset) {
    long offset = request.getOffset();
    int count = request.getCount();
    long smallerCount = offset + count - cachedOffset;
    if (LOG.isDebugEnabled()) {
        LOG.debug(String.format(// w w  w.  j  a v  a  2s . c  om
                "Got overwrite with appended data (%d-%d)," + " current offset %d,"
                        + " drop the overlapped section (%d-%d)" + " and append new data (%d-%d).",
                offset, (offset + count - 1), cachedOffset, offset, (cachedOffset - 1), cachedOffset,
                (offset + count - 1)));
    }

    ByteBuffer data = request.getData();
    Preconditions.checkState(data.position() == 0, "The write request data has non-zero position");
    data.position((int) (cachedOffset - offset));
    Preconditions.checkState(data.limit() - data.position() == smallerCount,
            "The write request buffer has wrong limit/position regarding count");

    request.setOffset(cachedOffset);
    request.setCount((int) smallerCount);
}

From source file:com.navercorp.pinpoint.common.server.bo.serializer.trace.v1.SpanBoTest.java

@Test
public void serialize2_V1() {
    SpanBo spanBo = new SpanBo();
    spanBo.setAgentId("agent");
    String service = createString(5);
    spanBo.setApplicationId(service);//from   ww  w. j av a2s . c  om
    String endPoint = createString(127);
    spanBo.setEndPoint(endPoint);
    String rpc = createString(255);
    spanBo.setRpc(rpc);

    spanBo.setServiceType(ServiceType.STAND_ALONE.getCode());
    spanBo.setApplicationServiceType(ServiceType.UNKNOWN.getCode());

    final ByteBuffer bytes = spanSerializer.writeColumnValue(spanBo);

    SpanBo newSpanBo = new SpanBo();
    Buffer valueBuffer = new OffsetFixedBuffer(bytes.array(), bytes.arrayOffset(), bytes.remaining());
    int i = spanDecoder.readSpan(newSpanBo, valueBuffer);
    logger.debug("length:{}", i);
    Assert.assertEquals(bytes.limit(), i);

    Assert.assertEquals(spanBo.getServiceType(), spanBo.getServiceType());
    Assert.assertEquals(spanBo.getApplicationServiceType(), spanBo.getApplicationServiceType());
}

From source file:org.cloudata.core.commitlog.pipe.CommitLogFileChannel.java

private int getTotalLengthOf(ByteBuffer[] bufferArray) {
    int totalLength = 0;

    for (ByteBuffer buf : bufferArray) {
        totalLength += buf.limit();
    }//  ww w .  ja va2  s  .  c  om

    return totalLength;
}

From source file:com.navercorp.pinpoint.common.server.bo.serializer.trace.v1.SpanBoTest.java

@Test
public void serialize_V1() {
    final SpanBo spanBo = new SpanBo();
    spanBo.setAgentId("agentId");
    spanBo.setApplicationId("applicationId");
    spanBo.setEndPoint("end");
    spanBo.setRpc("rpc");

    spanBo.setParentSpanId(5);/*from  www  .  j  av  a  2 s  .com*/
    spanBo.setAgentStartTime(1);

    TransactionId transactionId = new TransactionId("agentId", 2, 3);
    spanBo.setTransactionId(transactionId);
    spanBo.setElapsed(4);
    spanBo.setStartTime(5);

    spanBo.setServiceType(ServiceType.STAND_ALONE.getCode());

    spanBo.setLoggingTransactionInfo(LoggingInfo.INFO.getCode());

    spanBo.setExceptionInfo(1000, "Exception");

    ByteBuffer bytes = spanSerializer.writeColumnValue(spanBo);

    SpanBo newSpanBo = new SpanBo();
    Buffer valueBuffer = new OffsetFixedBuffer(bytes.array(), bytes.arrayOffset(), bytes.remaining());
    int i = spanDecoder.readSpan(newSpanBo, valueBuffer);
    logger.debug("length:{}", i);
    Assert.assertEquals(bytes.limit(), i);
    Assert.assertEquals(newSpanBo.getAgentId(), spanBo.getAgentId());
    Assert.assertEquals(newSpanBo.getApplicationId(), spanBo.getApplicationId());
    Assert.assertEquals(newSpanBo.getAgentStartTime(), spanBo.getAgentStartTime());
    Assert.assertEquals(newSpanBo.getElapsed(), spanBo.getElapsed());
    Assert.assertEquals(newSpanBo.getEndPoint(), spanBo.getEndPoint());
    Assert.assertEquals(newSpanBo.getErrCode(), spanBo.getErrCode());
    Assert.assertEquals(newSpanBo.getFlag(), spanBo.getFlag());

    //        not included for serialization
    //        Assert.assertEquals(newSpanBo.getTraceAgentStartTime(), spanBo.getTraceAgentStartTime());
    //        Assert.assertEquals(newSpanBo.getTraceTransactionSequence(), spanBo.getTraceTransactionSequence());
    Assert.assertEquals(newSpanBo.getParentSpanId(), spanBo.getParentSpanId());

    Assert.assertEquals(newSpanBo.getServiceType(), spanBo.getServiceType());
    Assert.assertEquals(newSpanBo.getApplicationServiceType(), spanBo.getServiceType());

    Assert.assertEquals(newSpanBo.getVersion(), spanBo.getVersion());

    Assert.assertEquals(newSpanBo.getLoggingTransactionInfo(), spanBo.getLoggingTransactionInfo());

    Assert.assertEquals(newSpanBo.getExceptionId(), spanBo.getExceptionId());
    Assert.assertEquals(newSpanBo.getExceptionMessage(), spanBo.getExceptionMessage());

}

From source file:org.apache.hadoop.hbase.io.hfile.TestChecksum.java

protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true)
                    .withIncludesTags(useTags).withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
                    .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i)
                    dos.writeInt(i);/* w  w w  .j  a  v a  2s.  c  o  m*/
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();

            // Use hbase checksums. 
            assertEquals(true, hfs.useHBaseChecksum());

            // Do a read that purposely introduces checksum verification failures.
            FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true)
                    .withIncludesTags(useTags).withHBaseCheckSum(true).build();
            HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
            HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            ByteBuffer bb = b.getBufferWithoutHeader(); // read back data
            DataInputStream in = new DataInputStream(
                    new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));

            // assert that we encountered hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(1, HFile.getChecksumFailuresCount());
            validateData(in);

            // A single instance of hbase checksum failure causes the reader to
            // switch off hbase checksum verification for the next 100 read
            // requests. Verify that this is correct.
            for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
                b = hbr.readBlockData(0, -1, -1, pread);
                assertEquals(0, HFile.getChecksumFailuresCount());
            }
            // The next read should have hbase checksum verification reanabled,
            // we verify this by assertng that there was a hbase-checksum failure.
            b = hbr.readBlockData(0, -1, -1, pread);
            assertEquals(1, HFile.getChecksumFailuresCount());

            // Since the above encountered a checksum failure, we switch
            // back to not checking hbase checksums.
            b = hbr.readBlockData(0, -1, -1, pread);
            assertEquals(0, HFile.getChecksumFailuresCount());
            is.close();

            // Now, use a completely new reader. Switch off hbase checksums in 
            // the configuration. In this case, we should not detect
            // any retries within hbase. 
            HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
            assertEquals(false, newfs.useHBaseChecksum());
            is = new FSDataInputStreamWrapper(newfs, path);
            hbr = new FSReaderV2Test(is, totalSize, newfs, path, meta);
            b = hbr.readBlockData(0, -1, -1, pread);
            is.close();
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            bb = b.getBufferWithoutHeader(); // read back data
            in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));

            // assert that we did not encounter hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(0, HFile.getChecksumFailuresCount());
            validateData(in);
        }
    }
}