Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:hivemall.topicmodel.ProbabilisticTopicModelBaseUDTF.java

protected void recordTrainSampleToTempFile(@Nonnull final String[] wordCounts) throws HiveException {
    if (iterations == 1) {
        return;/*from   w  w  w.  j a v a  2  s.  co m*/
    }

    ByteBuffer buf = inputBuf;
    NioStatefulSegment dst = fileIO;

    if (buf == null) {
        final File file;
        try {
            file = File.createTempFile("hivemall_topicmodel", ".sgmt");
            file.deleteOnExit();
            if (!file.canWrite()) {
                throw new UDFArgumentException("Cannot write a temporary file: " + file.getAbsolutePath());
            }
            logger.info("Record training samples to a file: " + file.getAbsolutePath());
        } catch (IOException ioe) {
            throw new UDFArgumentException(ioe);
        } catch (Throwable e) {
            throw new UDFArgumentException(e);
        }
        this.inputBuf = buf = ByteBuffer.allocateDirect(1024 * 1024); // 1 MB
        this.fileIO = dst = new NioStatefulSegment(file, false);
    }

    // wordCounts length, wc1 length, wc1 string, wc2 length, wc2 string, ...
    int wcLengthTotal = 0;
    for (String wc : wordCounts) {
        if (wc == null) {
            continue;
        }
        wcLengthTotal += wc.length();
    }
    int recordBytes = SizeOf.INT + SizeOf.INT * wordCounts.length + wcLengthTotal * SizeOf.CHAR;
    int requiredBytes = SizeOf.INT + recordBytes; // need to allocate space for "recordBytes" itself

    int remain = buf.remaining();
    if (remain < requiredBytes) {
        writeBuffer(buf, dst);
    }

    buf.putInt(recordBytes);
    buf.putInt(wordCounts.length);
    for (String wc : wordCounts) {
        NIOUtils.putString(wc, buf);
    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

/**
 * Add entry to a ledger, even if the ledger has previous been fenced. This should only
 * happen in bookie recovery or ledger recovery cases, where entries are being replicates
 * so that they exist on a quorum of bookies. The corresponding client side call for this
 * is not exposed to users.//from  w w  w . j a  v  a  2 s  . c om
 */
public void recoveryAddEntry(ByteBuffer entry, WriteCallback cb, Object ctx, byte[] masterKey)
        throws IOException, BookieException {
    long requestNanos = MathUtils.nowInNano();
    boolean success = false;
    int entrySize = 0;
    try {
        LedgerDescriptor handle = getLedgerForEntry(entry, masterKey);
        synchronized (handle) {
            entrySize = entry.remaining();
            addEntryInternal(handle, entry, cb, ctx);
        }
        success = true;
    } catch (NoWritableLedgerDirException e) {
        transitionToReadOnlyMode();
        throw new IOException(e);
    } finally {
        long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
        if (success) {
            recoveryAddEntryStats.registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerSuccessfulValue(entrySize);
        } else {
            recoveryAddEntryStats.registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerFailedValue(entrySize);
        }
    }
}

From source file:org.apache.hadoop.hdfs.BlockReaderLocalLegacy.java

@Override
public synchronized int read(ByteBuffer buf) throws IOException {
    int nRead = 0;
    if (verifyChecksum) {
        // A 'direct' read actually has three phases. The first drains any
        // remaining bytes from the slow read buffer. After this the read is
        // guaranteed to be on a checksum chunk boundary. If there are still bytes
        // to read, the fast direct path is used for as many remaining bytes as
        // possible, up to a multiple of the checksum chunk size. Finally, any
        // 'odd' bytes remaining at the end of the read cause another slow read to
        // be issued, which involves an extra copy.

        // Every 'slow' read tries to fill the slow read buffer in one go for
        // efficiency's sake. As described above, all non-checksum-chunk-aligned
        // reads will be served from the slower read path.

        if (slowReadBuff.hasRemaining()) {
            // There are remaining bytes from a small read available. This usually
            // means this read is unaligned, which falls back to the slow path.
            int fromSlowReadBuff = Math.min(buf.remaining(), slowReadBuff.remaining());
            writeSlice(slowReadBuff, buf, fromSlowReadBuff);
            nRead += fromSlowReadBuff;//w  w w. j a va 2  s.com
        }

        if (buf.remaining() >= bytesPerChecksum && offsetFromChunkBoundary == 0) {
            // Since we have drained the 'small read' buffer, we are guaranteed to
            // be chunk-aligned
            int len = buf.remaining() - (buf.remaining() % bytesPerChecksum);

            // There's only enough checksum buffer space available to checksum one
            // entire slow read buffer. This saves keeping the number of checksum
            // chunks around.
            len = Math.min(len, slowReadBuff.capacity());
            int oldlimit = buf.limit();
            buf.limit(buf.position() + len);
            int readResult = 0;
            try {
                readResult = doByteBufferRead(buf);
            } finally {
                buf.limit(oldlimit);
            }
            if (readResult == -1) {
                return nRead;
            } else {
                nRead += readResult;
                buf.position(buf.position() + readResult);
            }
        }

        // offsetFromChunkBoundary > 0 => unaligned read, use slow path to read
        // until chunk boundary
        if ((buf.remaining() > 0 && buf.remaining() < bytesPerChecksum) || offsetFromChunkBoundary > 0) {
            int toRead = Math.min(buf.remaining(), bytesPerChecksum - offsetFromChunkBoundary);
            int readResult = fillSlowReadBuffer(toRead);
            if (readResult == -1) {
                return nRead;
            } else {
                int fromSlowReadBuff = Math.min(readResult, buf.remaining());
                writeSlice(slowReadBuff, buf, fromSlowReadBuff);
                nRead += fromSlowReadBuff;
            }
        }
    } else {
        // Non-checksummed reads are much easier; we can just fill the buffer directly.
        nRead = doByteBufferRead(buf);
        if (nRead > 0) {
            buf.position(buf.position() + nRead);
        }
    }
    return nRead;
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

private static short[] readJumpTableVarColOffsets(RowState rowState, ByteBuffer rowBuffer, int rowStart,
        NullMask nullMask) {//from  ww w.  j  a  va 2  s . com
    short[] varColOffsets = rowState.getVarColOffsets();
    if (varColOffsets != null) {
        return varColOffsets;
    }

    // calculate offsets using jump-table info
    int nullMaskSize = nullMask.byteSize();
    int rowEnd = rowStart + rowBuffer.remaining() - 1;
    int numVarCols = ByteUtil.getUnsignedByte(rowBuffer, rowEnd - nullMaskSize);
    varColOffsets = new short[numVarCols + 1];

    int rowLen = rowEnd - rowStart + 1;
    int numJumps = (rowLen - 1) / MAX_BYTE;
    int colOffset = rowEnd - nullMaskSize - numJumps - 1;

    // If last jump is a dummy value, ignore it
    if (((colOffset - rowStart - numVarCols) / MAX_BYTE) < numJumps) {
        numJumps--;
    }

    int jumpsUsed = 0;
    for (int i = 0; i < numVarCols + 1; i++) {

        while ((jumpsUsed < numJumps)
                && (i == ByteUtil.getUnsignedByte(rowBuffer, rowEnd - nullMaskSize - jumpsUsed - 1))) {
            jumpsUsed++;
        }

        varColOffsets[i] = (short) (ByteUtil.getUnsignedByte(rowBuffer, colOffset - i)
                + (jumpsUsed * MAX_BYTE));
    }

    rowState.setVarColOffsets(varColOffsets);
    return varColOffsets;
}

From source file:org.commoncrawl.io.internal.NIOHttpConnection.java

public void Writeable(NIOClientSocket theSocket) throws IOException {

    if (!theSocket.isOpen()) {
        return;//  ww  w  . j ava2 s  .  co  m
    }

    int amountWritten = 0;

    try {

        boolean contentEOF = false;

        amountWritten = 0;

        if (_outBuf.available() == 0 && _dataSource != null) {
            // read some more data from the data source 
            contentEOF = _dataSource.read(_outBuf);
        }

        ByteBuffer bufferToWrite = _outBuf.read();

        if (bufferToWrite != null) {

            try {

                int amountToWrite = bufferToWrite.remaining();

                // if upload rate limiter is not null ... 
                if (_uploadRateLimiter != null) {
                    // apply rate limit policy to outbound data ... 
                    amountToWrite = _uploadRateLimiter.checkRateLimit(amountToWrite);
                }

                if (amountToWrite != 0) {
                    // if amount to write is less than remaining ... 
                    if (amountToWrite < bufferToWrite.remaining()) {
                        //slice the buffer ... 
                        ByteBuffer slicedBuffer = bufferToWrite.slice();
                        // limit to amount to write ... 
                        slicedBuffer.limit(amountToWrite);
                        // and write to socket ... 
                        amountWritten = _socket.write(slicedBuffer);
                        if (amountWritten >= 0) {
                            // advance source buffer manually...
                            bufferToWrite.position(bufferToWrite.position() + amountWritten);
                        }
                    } else {
                        amountWritten = _socket.write(bufferToWrite);
                    }

                    if (_uploadRateLimiter != null) {
                        _uploadRateLimiter.updateStats(amountWritten);

                        // debug output ... 
                        BandwidthUtils.BandwidthStats stats = new BandwidthUtils.BandwidthStats();
                        // collect stats 
                        _uploadRateLimiter.getStats(stats);
                        // dump stats ... 
                        // System.out.println("Connection: "+ this+"Upload Speed:" + stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits + " TotalWritten:" + (_cumilativeWritten + amountWritten) );
                        // LOG.info("Connection:" + getId()+" BytesOut:" + amountWritten +" Upload Speed:" + stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits + " TotalWritten:" + (_cumilativeWritten + amountWritten));
                    }
                }
            } catch (IOException exception) {
                // LOG.error(CCStringUtils.stringifyException(e));
                throw exception;
            }
            _totalWritten += amountWritten;
            _cumilativeWritten += amountWritten;

            // System.out.println("NIOHttpConnection->wrote:" + amountWritten + "Bytes TotalWritten:" + _cumilativeWritten);

            if (bufferToWrite.remaining() > 0) {
                _outBuf.putBack(bufferToWrite);
            }
        }

        if (_totalWritten > 0 && !_outBuf.isDataAvailable() && (_dataSource == null || contentEOF)) {

            _lastReadOrWriteTime = System.currentTimeMillis();

            // transition from sending to receiving ... 
            if (_state == State.SENDING_REQUEST) {
                // set up an initial last read time value here ... 
                setState(State.RECEIVING_HEADERS, null);
                _selector.registerForRead(theSocket);
            }
        }
    } catch (IOException e) {
        LOG.error("Writeable for url:" + getURL() + " threw Exception:" + e.getMessage());

        setErrorType(ErrorType.IOEXCEPTION);
        setErrorDesc(StringUtils.stringifyException(e));
        setState(State.ERROR, e);

        throw e;
    }

    if (_state == State.SENDING_REQUEST) {
        _selector.registerForReadAndWrite(theSocket);
    } else if (_state.ordinal() >= State.RECEIVING_HEADERS.ordinal()
            && _state.ordinal() < State.DONE.ordinal()) {
        _selector.registerForRead(theSocket);
    }
}

From source file:org.commoncrawl.io.internal.NIOHttpConnection.java

public int Readable(NIOClientSocket theSocket) throws IOException {

    if (!theSocket.isOpen()) {
        LOG.error("Readable Called on Closed Socket");
        return -1;
    }//from  w  w  w . j  a  v a2  s .c o  m

    int totalBytesRead = 0;
    int singleReadAmount = 0;
    boolean overflow = false;
    boolean disconnected = false;

    try {

        if (_downloadMax == -1 || _totalRead < _downloadMax) {

            do {

                ByteBuffer buffer = _inBuf.getWriteBuf();

                if (_downloadMax != -1) {
                    if (_totalRead + buffer.remaining() > _downloadMax) {
                        int overflowAmt = (_totalRead + buffer.remaining()) - _downloadMax;
                        buffer.limit(buffer.limit() - overflowAmt);
                    }
                }

                singleReadAmount = _socket.read(buffer);

                if (singleReadAmount > 0) {
                    _inBuf.write(buffer);
                    _totalRead += singleReadAmount;
                    _cumilativeRead += singleReadAmount;
                    totalBytesRead += singleReadAmount;
                }

            } while (singleReadAmount > 0 && (_downloadMax == -1 || _totalRead < _downloadMax));

            if (_downloadMax != -1 && _totalRead == _downloadMax) {
                overflow = true;
                _contentTruncated = true;
            }
        }

        if (totalBytesRead > 0) {
            // flush any written buffers .
            _inBuf.flush();
            // process incoming buffer 
            processIncomingData(totalBytesRead);
        }

        if (singleReadAmount == -1 || overflow) {

            disconnected = true;

            if (getState() == State.RECEIVING_CONTENT
                    && (overflow || _contentLength == -1 || _contentLength == _downloadedContentLength)) {

                // if we are still in the middle of processing chunked data ... 
                if (_chunked) {
                    // clear out existing input buffer ...
                    _inBuf.reset();
                    // and if a chunk buffer is available ... 
                    if (_chunkContentBuffer != null) {
                        // take what we can get ... 

                        // flush chunk buffer ...
                        _chunkContentBuffer.flush();
                        // and swap it with the real content buffer ... 
                        _inBuf = _chunkContentBuffer;
                        // reset chunk state ... 
                        _chunkContentBuffer = null;
                    }
                    // reset chunked flag ... 
                    _chunked = false;

                    // and now, if this is NOT an overflow condidition ... 
                    if (!overflow) {
                        // interpret this as an error ...
                        setErrorType(ErrorType.IOEXCEPTION);
                        setErrorDesc("Connection Closed Before Receiving Chunk Trailer");
                        setState(State.ERROR, new java.net.SocketException());
                    }
                }

                // now check one more time of we are are in the proper state ... 
                if (getState() == State.RECEIVING_CONTENT) {
                    setState(State.DONE, null);
                }
            } else if (getState() != State.DONE) {
                if (getState() == State.RECEIVING_CONTENT && _downloadedContentLength != 0) {
                    LOG.warn("URL:" + _url + " POSSIBLE TRUNCATION: Read returned -1 with ContentLength:"
                            + _contentLength + " BufferSize:" + _inBuf.available() + " DownloadSize:"
                            + _downloadedContentLength + " State:" + getState() + "Context:" + _context);
                    setState(State.DONE, null);
                } else {
                    LOG.error("URL:" + _url + " Read returned -1 with ContentLength:" + _contentLength
                            + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength
                            + " State:" + getState() + "Context:" + _context);

                    setErrorType(ErrorType.IOEXCEPTION);
                    setErrorDesc("Read returned -1 with ContentLength:" + _contentLength + " BufferSize:"
                            + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:"
                            + getState());
                    setState(State.ERROR, new java.net.SocketException());
                }
            }
        }
    } catch (IOException e) {
        LOG.error("Readable for url:" + getURL() + " threw Exception:" + e.getMessage());

        setErrorType(ErrorType.IOEXCEPTION);
        setErrorDesc(StringUtils.stringifyException(e));

        setState(State.ERROR, e);

    }
    if (_socket.isOpen()) {
        // if we data to write ... 
        if (_outBuf.isDataAvailable()) {
            _selector.registerForReadAndWrite(theSocket);
        } else {
            _selector.registerForRead(theSocket);
        }
    }

    if (totalBytesRead > 0) {
        //update last read time ...
        _lastReadOrWriteTime = System.currentTimeMillis();
    }

    return (disconnected) ? -1 : totalBytesRead;
}

From source file:com.yahoo.omid.tso.persistence.LoggerProtocol.java

/**
 * Execute a logged entry (several logged ops)
 * @param bb Serialized operations//from   www  .  j  av a 2 s.  co  m
 */
void execute(ByteBuffer bb) {
    boolean done = !bb.hasRemaining();
    while (!done) {
        byte op = bb.get();
        long timestamp, startTimestamp, commitTimestamp;
        if (LOG.isTraceEnabled()) {
            LOG.trace("Operation: " + op);
        }
        switch (op) {
        case TIMESTAMPORACLE:
            timestamp = bb.getLong();
            this.getSO().initialize(timestamp);
            this.initialize();
            oracle = true;
            break;
        case COMMIT:
            startTimestamp = bb.getLong();
            commitTimestamp = bb.getLong();
            processCommit(startTimestamp, commitTimestamp);
            if (commitTimestamp < largestDeletedTimestamp) {
                commits = true;
            }
            break;
        case LARGESTDELETEDTIMESTAMP:
            timestamp = bb.getLong();
            processLargestDeletedTimestamp(timestamp);

            break;
        case ABORT:
            timestamp = bb.getLong();
            processAbort(timestamp);

            break;
        case FULLABORT:
            timestamp = bb.getLong();
            processFullAbort(timestamp);

            break;
        case LOGSTART:
            consumed = true;
            break;
        case SNAPSHOT:
            int snapshot = (int) bb.getLong();
            if (snapshot > this.snapshot) {
                this.snapshot = snapshot;
                this.hasSnapshot = true;
            }
            if (hasSnapshot && snapshot < this.snapshot) {
                this.aborts = true;
            }
            break;
        }
        if (bb.remaining() == 0)
            done = true;
    }
}

From source file:com.github.hrpc.rpc.Server.java

/**
 * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}.
 * If the amount of data is large, it writes to channel in smaller chunks.
 * This is to avoid jdk from creating many direct buffers as the size of
 * buffer increases. This also minimizes extra copies in NIO layer
 * as a result of multiple write operations required to write a large
 * buffer./*w ww .j a va 2 s . c  o  m*/
 *
 * @see WritableByteChannel#write(ByteBuffer)
 */
private int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException {

    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer)
            : channelIO(null, channel, buffer);
    if (count > 0) {
        rpcMetrics.incrSentBytes(count);
    }
    return count;
}

From source file:com.github.hrpc.rpc.Server.java

/**
 * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}.
 * If the amount of data is large, it writes to channel in smaller chunks.
 * This is to avoid jdk from creating many direct buffers as the size of
 * ByteBuffer increases. There should not be any performance degredation.
 *
 * @see ReadableByteChannel#read(ByteBuffer)
 *///from   w w w .  j a  v  a2  s .  co m
private int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException {

    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer)
            : channelIO(channel, null, buffer);
    if (count > 0) {
        rpcMetrics.incrReceivedBytes(count);
    }
    return count;
}

From source file:com.codefollower.lealone.omid.tso.persistence.LoggerProtocol.java

/**
 * Execute a logged entry (several logged ops)
 * @param bb Serialized operations/*from  w w w  .j  ava  2s .  co m*/
 */
void execute(ByteBuffer bb) {
    boolean done = !bb.hasRemaining();
    while (!done) {
        byte op = bb.get();
        long timestamp, startTimestamp, commitTimestamp;
        if (LOG.isTraceEnabled()) {
            LOG.trace("Operation: " + op);
        }
        switch (op) {
        case TIMESTAMP_ORACLE:
            timestamp = bb.getLong();
            this.getTimestampOracle().initialize(timestamp);
            this.initialize();
            oracle = true;
            break;
        case COMMIT:
            startTimestamp = bb.getLong();
            commitTimestamp = bb.getLong();
            processCommit(startTimestamp, commitTimestamp);
            if (commitTimestamp < largestDeletedTimestamp) {
                commits = true;
            }
            break;
        case LARGEST_DELETED_TIMESTAMP:
            timestamp = bb.getLong();
            processLargestDeletedTimestamp(timestamp);

            break;
        case ABORT:
            timestamp = bb.getLong();
            processHalfAbort(timestamp);

            break;
        case FULL_ABORT:
            timestamp = bb.getLong();
            processFullAbort(timestamp);

            break;
        case LOG_START:
            consumed = true;
            break;
        case SNAPSHOT:
            int snapshot = (int) bb.getLong();
            if (snapshot > this.snapshot) {
                this.snapshot = snapshot;
                this.hasSnapshot = true;
            }
            if (hasSnapshot && snapshot < this.snapshot) {
                this.aborts = true;
            }
            break;
        }
        if (bb.remaining() == 0)
            done = true;
    }
}