Example usage for java.nio ByteBuffer capacity

List of usage examples for java.nio ByteBuffer capacity

Introduction

In this page you can find the example usage for java.nio ByteBuffer capacity.

Prototype

public final int capacity() 

Source Link

Document

Returns the capacity of this buffer.

Usage

From source file:voldemort.common.nio.ByteBufferBackedInputStream.java

public void setBuffer(ByteBuffer newBuffer) {
    // update the size tracker with the new buffer size
    if ((sizeTracker != null && this.buffer != null && newBuffer != null)) {
        sizeTracker.add(newBuffer.capacity());
        sizeTracker.subtract(this.buffer.capacity());
    }/*from  w  w w.  j  a v a2s  .  c om*/
    this.buffer = newBuffer;
}

From source file:org.apache.hadoop.ipc.ServerRpcSSLEngineImpl.java

@Override
public int write(WritableByteChannel channel, ByteBuffer buffer) throws IOException {
    serverAppBuffer.clear();/*  w  w  w . j a v  a  2 s. c  o  m*/
    if (serverAppBuffer.capacity() < buffer.capacity()) {
        LOG.debug("ServerAppBuffer capacity: " + serverAppBuffer.capacity() + " Buffer size: "
                + buffer.capacity());
        serverAppBuffer = ByteBuffer.allocate(Math.min(buffer.capacity(), MAX_BUFFER_SIZE));
    }
    serverAppBuffer.put(buffer);
    serverAppBuffer.flip();

    int bytesWritten = 0;
    while (serverAppBuffer.hasRemaining()) {
        serverNetBuffer.clear();
        SSLEngineResult result = sslEngine.wrap(serverAppBuffer, serverNetBuffer);
        switch (result.getStatus()) {
        case OK:
            serverNetBuffer.flip();
            while (serverNetBuffer.hasRemaining()) {
                bytesWritten += channel.write(serverNetBuffer);
            }
            //return bytesWritten;
            break;
        case BUFFER_OVERFLOW:
            serverNetBuffer = enlargePacketBuffer(serverNetBuffer);
            break;
        case BUFFER_UNDERFLOW:
            throw new SSLException("Buffer underflow should not happen after wrap");
        case CLOSED:
            sslEngine.closeOutbound();
            doHandshake();
            return -1;
        default:
            throw new IllegalStateException("Invalid SSL state: " + result.getStatus());
        }
    }
    return bytesWritten;
}

From source file:org.apache.myriad.state.utils.ByteBufferSupportTest.java

@Test
public void testGetBytes() throws Exception {
    ByteBuffer bb = getByteBuffer(BYTE_ARRAY);

    byte[] bytes = ByteBufferSupport.getBytes(bb, bb.capacity());

    assertEquals(BYTE_ARRAY.length, bytes.length);

    for (int i = 0, j = bytes.length; i < j; i++) {
        assertEquals(bytes[i], BYTE_ARRAY[i]);
    }/*from   www .  j  av  a  2 s. c  o m*/
}

From source file:voldemort.common.nio.ByteBufferBackedOutputStream.java

public void setBuffer(ByteBuffer newBuffer) {
    // update the size tracker with the new buffer size
    if ((sizeTracker != null && this.buffer != null && newBuffer != null)) {
        sizeTracker.add(newBuffer.capacity());
        sizeTracker.subtract(this.buffer.capacity());
    }//from   w  w w .  j  av a2 s. c om
    this.buffer = newBuffer;
    wasExpanded = false;
}

From source file:org.apache.hadoop.hbase.io.BoundedByteBufferPool.java

public ByteBuffer getBuffer() {
    ByteBuffer bb = null;
    lock.lock();//from  w  w  w .  jav a2  s  .com
    try {
        bb = this.buffers.poll();
        if (bb != null) {
            this.totalReservoirCapacity -= bb.capacity();
        }
    } finally {
        lock.unlock();
    }
    if (bb != null) {
        // Clear sets limit == capacity. Postion == 0.
        bb.clear();
    } else {
        bb = ByteBuffer.allocate(this.runningAverage);
        this.allocations.incrementAndGet();
    }
    if (LOG.isTraceEnabled()) {
        LOG.trace("runningAverage=" + this.runningAverage + ", totalCapacity=" + this.totalReservoirCapacity
                + ", count=" + this.buffers.size() + ", alloctions=" + this.allocations.get());
    }
    return bb;
}

From source file:org.apache.tajo.tuple.memory.ResizableMemoryBlock.java

public ResizableMemoryBlock(ByteBuffer buffer) {
    this(Unpooled.wrappedBuffer(buffer), new ResizableLimitSpec(buffer.capacity(), buffer.capacity()));
}

From source file:voldemort.common.nio.ByteBufferBackedOutputStream.java

public ByteBufferBackedOutputStream(ByteBuffer buffer, MutableLong sizeTracker) {
    this.buffer = buffer;
    wasExpanded = false;//ww  w.j  av  a2s  . c om
    this.sizeTracker = sizeTracker;
    if (buffer != null)
        this.sizeTracker.add(buffer.capacity());
}

From source file:com.meidusa.venus.benchmark.FileLineRandomData.java

private final String readLine(ByteBuffer buffer) {
    if (closed)//  www. j av a 2 s .c o m
        throw new IllegalStateException("file closed..");
    ByteBuffer tempbuffer = localTempBuffer.get();
    tempbuffer.position(0);
    tempbuffer.limit(tempbuffer.capacity());
    byte c = -1;
    boolean eol = false;
    while (!eol) {
        switch (c = buffer.get()) {
        case -1:
        case '\n':
            eol = true;
            break;
        case '\r':
            eol = true;
            int cur = buffer.position();
            if ((buffer.get()) != '\n') {
                buffer.position(cur);
            }
            break;
        default:
            tempbuffer.put(c);
            break;
        }
    }

    if ((c == -1) && (tempbuffer.position() == 0)) {
        return null;
    }
    tempbuffer.flip();

    try {
        return new String(tempbuffer.array(), encoding);
    } catch (UnsupportedEncodingException e) {
        return new String(tempbuffer.array());
    }

}

From source file:cn.ac.ncic.mastiff.io.coding.MVDecoder.java

public void ensureDecompress() throws IOException {
    if (compressAlgo != null && page == null) {
        org.apache.hadoop.io.compress.Decompressor decompressor = this.compressAlgo.getDecompressor();
        InputStream is = this.compressAlgo.createDecompressionStream(inBuf, decompressor, 0);
        ByteBuffer buf = ByteBuffer.allocate(decompressedSize);
        IOUtils.readFully(is, buf.array(), 3 * Bytes.SIZEOF_INT, buf.capacity() - 3 * Bytes.SIZEOF_INT);
        is.close();/* w w  w .j a v  a  2  s.  c o  m*/
        this.compressAlgo.returnDecompressor(decompressor);
        page = buf.array();
    }
}

From source file:org.apache.hadoop.mapred.nativetask.buffer.DirectBufferPool.java

public void returnBuffer(ByteBuffer buffer) throws IOException {
    if (null == buffer || !buffer.isDirect()) {
        throw new IOException("the buffer is null or the buffer returned is not direct buffer");
    }//from w w  w .  j av  a2 s.c  o m

    buffer.clear();
    int capacity = buffer.capacity();
    Queue<WeakReference<ByteBuffer>> list = bufferMap.get(capacity);
    if (null == list) {
        list = new ConcurrentLinkedQueue<WeakReference<ByteBuffer>>();
        Queue<WeakReference<ByteBuffer>> prev = bufferMap.putIfAbsent(capacity, list);
        if (prev != null) {
            list = prev;
        }
    }
    list.add(new WeakReference<ByteBuffer>(buffer));
}