Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Reads the column data from the given row buffer.  Leaves limit unchanged.
 * Caches the returned value in the rowState.
 *///www  .java2s  .c  o m
private static Object getRowColumn(JetFormat format, ByteBuffer rowBuffer, ColumnImpl column, RowState rowState,
        Map<ColumnImpl, byte[]> rawVarValues) throws IOException {
    byte[] columnData = null;
    try {

        NullMask nullMask = rowState.getNullMask(rowBuffer);
        boolean isNull = nullMask.isNull(column);
        if (column.storeInNullMask()) {
            // Boolean values are stored in the null mask.  see note about
            // caching below
            return rowState.setRowCacheValue(column.getColumnIndex(), column.readFromNullMask(isNull));
        } else if (isNull) {
            // well, that's easy! (no need to update cache w/ null)
            return null;
        }

        Object cachedValue = rowState.getRowCacheValue(column.getColumnIndex());
        if (cachedValue != null) {
            // we already have it, use it
            return cachedValue;
        }

        // reset position to row start
        rowBuffer.reset();

        // locate the column data bytes
        int rowStart = rowBuffer.position();
        int colDataPos = 0;
        int colDataLen = 0;
        if (!column.isVariableLength()) {

            // read fixed length value (non-boolean at this point)
            int dataStart = rowStart + format.OFFSET_COLUMN_FIXED_DATA_ROW_OFFSET;
            colDataPos = dataStart + column.getFixedDataOffset();
            colDataLen = column.getType().getFixedSize(column.getLength());

        } else {
            int varDataStart;
            int varDataEnd;

            if (format.SIZE_ROW_VAR_COL_OFFSET == 2) {

                // read simple var length value
                int varColumnOffsetPos = (rowBuffer.limit() - nullMask.byteSize() - 4)
                        - (column.getVarLenTableIndex() * 2);

                varDataStart = rowBuffer.getShort(varColumnOffsetPos);
                varDataEnd = rowBuffer.getShort(varColumnOffsetPos - 2);

            } else {

                // read jump-table based var length values
                short[] varColumnOffsets = readJumpTableVarColOffsets(rowState, rowBuffer, rowStart, nullMask);

                varDataStart = varColumnOffsets[column.getVarLenTableIndex()];
                varDataEnd = varColumnOffsets[column.getVarLenTableIndex() + 1];
            }

            colDataPos = rowStart + varDataStart;
            colDataLen = varDataEnd - varDataStart;
        }

        // grab the column data
        rowBuffer.position(colDataPos);
        columnData = ByteUtil.getBytes(rowBuffer, colDataLen);

        if ((rawVarValues != null) && column.isVariableLength()) {
            // caller wants raw value as well
            rawVarValues.put(column, columnData);
        }

        // parse the column data.  we cache the row values in order to be able
        // to update the index on row deletion.  note, most of the returned
        // values are immutable, except for binary data (returned as byte[]),
        // but binary data shouldn't be indexed anyway.
        return rowState.setRowCacheValue(column.getColumnIndex(), column.read(columnData));

    } catch (Exception e) {

        // cache "raw" row value.  see note about caching above
        rowState.setRowCacheValue(column.getColumnIndex(), ColumnImpl.rawDataWrapper(columnData));

        return rowState.handleRowError(column, columnData, e);
    }
}

From source file:com.linkedin.databus.core.DbusEventBuffer.java

private void saveDataBufferMetaInfo(boolean infoOnly) throws IOException {

    if (_allocationPolicy != AllocationPolicy.MMAPPED_MEMORY || !_bufferPersistenceEnabled) {
        _log.info("Not saving state metaInfoFile, because allocation policy is " + _allocationPolicy
                + "; bufferPersistenceEnabled:" + _bufferPersistenceEnabled);
        return;/*from   w  w w  . j  av  a2s . co  m*/
    }

    String fileName = metaFileName() + (infoOnly ? MMAP_META_INFO_SUFFIX : "");
    DbusEventBufferMetaInfo mi = new DbusEventBufferMetaInfo(new File(_mmapDirectory, fileName));
    _log.info("about to save DbusEventBuffer for PP " + _physicalPartition + " state into " + mi.toString());

    // record session id - to figure out directory for the buffers
    mi.setSessionId(_sessionId);

    // write buffers specific info - num of buffers, pos and limit of each one
    mi.setVal(DbusEventBufferMetaInfo.NUM_BYTE_BUFFER, Integer.toString(_buffers.length));
    StringBuilder bufferInfo = new StringBuilder("");
    for (ByteBuffer b : _buffers) {
        DbusEventBufferMetaInfo.BufferInfo bi = new DbusEventBufferMetaInfo.BufferInfo(b.position(), b.limit(),
                b.capacity());
        bufferInfo.append(bi.toString());
        bufferInfo.append(" ");
    }
    mi.setVal(DbusEventBufferMetaInfo.BYTE_BUFFER_INFO, bufferInfo.toString());

    String currentWritePosition = Long.toString(_currentWritePosition.getPosition());
    mi.setVal(DbusEventBufferMetaInfo.CURRENT_WRITE_POSITION, currentWritePosition);

    // _maxBufferSize
    mi.setVal(DbusEventBufferMetaInfo.MAX_BUFFER_SIZE, Integer.toString(_maxBufferSize));

    //NOTE. no need to save readBuffer and rwChannel

    String head = Long.toString(_head.getPosition());
    mi.setVal(DbusEventBufferMetaInfo.BUFFER_HEAD, head);

    String tail = Long.toString(_tail.getPosition());
    mi.setVal(DbusEventBufferMetaInfo.BUFFER_TAIL, tail);

    String empty = Boolean.toString(_empty);
    mi.setVal(DbusEventBufferMetaInfo.BUFFER_EMPTY, empty);

    mi.setVal(DbusEventBufferMetaInfo.ALLOCATED_SIZE, Long.toString(_allocatedSize));

    mi.setVal(DbusEventBufferMetaInfo.EVENT_START_INDEX, Long.toString(_eventStartIndex.getPosition()));

    // _numEventsInWindow
    mi.setVal(DbusEventBufferMetaInfo.NUM_EVENTS_IN_WINDOW, Integer.toString(_numEventsInWindow));
    // _lastWrittenSequence
    mi.setVal(DbusEventBufferMetaInfo.LAST_WRITTEN_SEQUENCE, Long.toString(_lastWrittenSequence));

    mi.setVal(DbusEventBufferMetaInfo.SEEN_END_OF_PERIOD_SCN, Long.toString(_seenEndOfPeriodScn));
    // _prevScn
    mi.setVal(DbusEventBufferMetaInfo.PREV_SCN, Long.toString(_prevScn));
    // _timestampOfFirstEvent
    mi.setVal(DbusEventBufferMetaInfo.TIMESTAMP_OF_FIRST_EVENT, Long.toString(_timestampOfFirstEvent));
    // _timestampOfLatestDataEvent
    mi.setVal(DbusEventBufferMetaInfo.TIMESTAMP_OF_LATEST_DATA_EVENT,
            Long.toString(_timestampOfLatestDataEvent));
    // eventState
    mi.setVal(DbusEventBufferMetaInfo.EVENT_STATE, _eventState.toString());

    mi.saveAndClose();

}

From source file:com.linkedin.databus.core.DbusEventBuffer.java

/**
 * Copies the current event bytes from the staging buffer to the main buffer. Previous calls must
 * ensure that the target write area determined by writePos is already free.
 * @param readPos         determines the region in the staging buffer to copy from
 * @param writePos        determines the region in the main buffer to write to
 *///from  w  w w  .j  a  v a2  s  . c  o m
private void copyReadEventToEventBuffer(ReadEventsReadPosition readPos, ReadEventsWritePosition writePos,
        Iterable<InternalDatabusEventsListener> eventListeners, DbusEventsStatisticsCollector statsCollector,
        boolean logDebugEnabled) {
    final ByteBuffer readBuffer = readPos.getReadBuffer();
    final int numBytesToWrite = readPos.bytesProcessed();
    final int writeStartOfs = writePos.getCurOfs();
    final ByteBuffer curBuf = writePos.getCurBuf();

    assert writePos.getNextFree().bufferGenId() - _head.bufferGenId() <= 1 : writePos.toString() + " buf:"
            + toString();

    assert curBuf.limit() >= writePos.getNextFreeOfs() : "curBuf:" + curBuf + "; " + writePos;

    final int oldLimit = readBuffer.limit();
    readBuffer.mark();
    readBuffer.position(readPos.getReadStart());
    readBuffer.limit(readPos.getPosition());

    // Set the limit/position
    curBuf.position(writeStartOfs);
    if (LOG.isDebugEnabled()) {
        LOG.debug("copying from " + readBuffer + " into " + writePos.getCurBuf() + "head:" + _head + " tail:"
                + _tail);
    }
    curBuf.put(readBuffer); // copy _readBuffer
    readBuffer.limit(oldLimit);
    readBuffer.reset();

    if (numBytesToWrite > 0) {
        // update index and call listeners on each event (may rewrite event)
        updateNewReadEvent(readPos, writePos, statsCollector, eventListeners, logDebugEnabled);
        if (readPos.getLastSeenStgWin() > _seenEndOfPeriodScn) {
            _seenEndOfPeriodScn = readPos.getLastSeenStgWin(); // this is end of period for this SCN
        }
    }
    if (logDebugEnabled)
        LOG.debug("Tail is set to :" + _tail + ", Head is at :" + _head);

    assert (_head.bufferIndex() != _tail.bufferIndex() || _head.getPosition() < _tail.getPosition()
            || _head.bufferOffset() < writePos.getCurBuf().limit());
}

From source file:com.linkedin.databus.core.DbusEventBuffer.java

private int readEventsInternal(ReadableByteChannel readChannel,
        Iterable<InternalDatabusEventsListener> eventListeners, DbusEventsStatisticsCollector statsCollector)
        throws InvalidEventException {
    final boolean logDebugEnabled = _log.isDebugEnabled();

    ReadEventsReadPosition readPos = new ReadEventsReadPosition();
    ReadEventsWritePosition writePos = new ReadEventsWritePosition();

    _readBufferLock.lock();//from   ww w . ja  va 2 s  .  com
    try {
        _eventState = WindowState.IN_READ;

        boolean mightHaveMoreData = true;
        //ensuring index is updated correctly if a control event of preceding window doesn't appear
        //first (no start() called)
        if (_scnIndex.isEnabled() && _scnIndex.isEmpty()) {
            _scnIndex.setUpdateOnNext(true);
        }
        try {
            while (mightHaveMoreData) {
                final ByteBuffer readBuffer = readPos.getReadBuffer();
                boolean success = readEventsFromChannel(readChannel, readBuffer, logDebugEnabled);
                readPos.startIteration();

                final int numBytesRead = readPos.bytesRemaining();

                //if there is an error we'll try to process whatever was read but stop after that
                mightHaveMoreData = success && (numBytesRead > 0)
                        && (readBuffer.position() == readBuffer.limit());

                if (numBytesRead > 0) {
                    _queueLock.lock();
                    try {
                        if (isClosed()) {
                            LOG.warn(
                                    "stopping attempt to read more events into a buffer while it is closed. readPos="
                                            + readPos + "; buf=" + this.toString());
                            return 0;
                        }
                        try {
                            _scnIndex.assertHeadPosition(_head.getRealPosition());
                            _bufferPositionParser.assertSpan(_head.getPosition(),
                                    _currentWritePosition.getPosition(), logDebugEnabled);
                        } catch (RuntimeException re) {
                            _log.fatal("Got runtime Exception :", re);
                            _log.fatal("Event Buffer is :" + toString());
                            _scnIndex.printVerboseString(_log, Level.DEBUG);
                            throw re;
                        }

                        readBuffer.flip();
                        boolean hasMoreInStgBuffer = true;
                        while (hasMoreInStgBuffer && readPos.hasNext()) {
                            writePos.startNewIteration();

                            //figure out the boundary of events at which we can write
                            //leave one byte at the end, to distinguish between a finalized full ByteBuffer
                            //(limit <= capacity - 1) and a ByteBuffer that is still being written to
                            //(limit == capacity)
                            final int contiguousCapacity = writePos.getCurBuf().capacity()
                                    - writePos.getCurOfs() - 1;

                            final ReadEventsScanStatus eventScanStatus = readPos.startEventProcessing();
                            switch (eventScanStatus) {
                            case OK: {
                                final int curEventSize = readPos.getCurEvent().size();
                                if (readPos.bytesProcessed() + curEventSize > contiguousCapacity) {
                                    //not enough space to fit event in the target buffer
                                    if (0 == writePos.getCurOfs()) {
                                        //event bigger than the ByteBuffer capacity
                                        throw new InvalidEventException("event too big to fit into buffer"
                                                + "; size:" + curEventSize + "; event:" + readPos.getCurEvent()
                                                + "; " + readPos + "; buffer.capacity:"
                                                + writePos.getCurBuf().capacity());
                                    } else {
                                        if (logDebugEnabled)
                                            _log.debug("unable to fit event with size "
                                                    + readPos.getCurEvent().size());

                                        //if we could not fit all the data in the destination ByteBuffer,
                                        //we should ensure that we clear up any remaining data in the
                                        //ByteBuffer.
                                        long nextBufferPos = _bufferPositionParser
                                                .incrementIndex(writePos.getCurPos(), _buffers);
                                        boolean interrupted = ensureFreeSpace(writePos.getCurPos(),
                                                nextBufferPos, logDebugEnabled);
                                        if (interrupted) {
                                            _log.warn("ensureFree space interrupted: " + readPos + " "
                                                    + writePos);
                                            return readPos.getNumReadEvents();
                                        }
                                        assert assertBuffersLimits();

                                        writePos.moveToNextBuffer();
                                        _tail.copy(_currentWritePosition);
                                        assert assertBuffersLimits();
                                    }
                                } else {
                                    //we can fit the event in the target buffer
                                    readPos.eventAccepted(); //done with processing in the stg buffer

                                    //how are we on free space?
                                    boolean interrupted = ensureFreeSpace(writePos.getCurPos(),
                                            writePos.getCurPos() + curEventSize, logDebugEnabled);
                                    if (interrupted) {
                                        _log.warn("ensureFree space interrupted:" + readPos + " " + writePos);
                                        return readPos.getNumReadEvents();
                                    }

                                    writePos.determineWriteEnd(readPos);

                                    //we are good on free space, about time to copy the damn data
                                    copyReadEventToEventBuffer(readPos, writePos, eventListeners,
                                            statsCollector, logDebugEnabled);
                                }
                                break;
                            }
                            case PARTIAL_EVENT: {
                                final int curCapacity = readBuffer.capacity();
                                if (logDebugEnabled)
                                    _log.debug("partial event at " + readPos);
                                if (0 != readPos.getReadStart()) {
                                    //compact stg buffer and try to read more data from the network
                                    compactStgBuffer(readPos, logDebugEnabled);
                                    hasMoreInStgBuffer = false;
                                } else if (curCapacity >= getMaxReadBufferCapacity()) {
                                    //we couldn't read an entire event in the staging buffer and we are already
                                    //at max allowed size of the read buffer
                                    throw new InvalidEventException(
                                            "event too big to fit in staging buffer with capacity : "
                                                    + curCapacity + "; readPos:" + readPos
                                                    + "; consider increasing connectionDefaults.eventBuffer.maxSize"
                                                    + " or connectionDefaults.eventBuffer.maxEventSize if set explicitly.");
                                } else {
                                    //grow the staging buffer faster for small sizes and slower for big sizes
                                    //intuitively: <= 5K - 3x, 25K - 2x, 125K - 1.6x, 625K - 1.5x and so on
                                    final double growFactor = curCapacity <= 5 * 1024 ? 3.0
                                            : 1.0 + 2.0 * LN_5 / Math.log(curCapacity / 1024.0);
                                    final int newSize = Math.min(getMaxReadBufferCapacity(),
                                            (int) (growFactor * curCapacity));
                                    if (newSize < curCapacity) {
                                        throw new DatabusRuntimeException("unexpected readbuffer size: "
                                                + newSize + "; readBuffer=" + readBuffer
                                                + "; readBufferCapacity=" + getMaxReadBufferCapacity());
                                    }
                                    readPos.growReadBuffer(newSize);
                                    hasMoreInStgBuffer = false;
                                }
                                break;
                            }
                            case SCN_REGRESSION: {
                                // events should be monotonically increasing
                                // skipping the event and all the events before it (same buffer should have
                                // only increasing events)
                                String errMsg = logSequenceErrorPackets(readPos);
                                _log.warn("got an old event: seq=" + readPos.getSeq() + ", " + errMsg);
                                readPos.eventSkipped();
                                break;
                            }
                            case INVALID_EVENT: {
                                if (null != statsCollector)
                                    statsCollector
                                            .registerEventError(DbusEventInternalReadable.EventScanStatus.ERR);
                                throw new InvalidEventException();
                            }
                            case MISSING_EOP: {
                                String errMsg = logSequenceErrorPackets(readPos);
                                _log.error("detected missing EOP: " + errMsg);
                                throw new InvalidEventException(errMsg);
                            }
                            default:
                                throw new IllegalStateException("unknown scan status: " + eventScanStatus);
                            }
                        }

                        if (!readPos.hasNext()) {
                            readBuffer.clear();
                        }
                    } finally {
                        _queueLock.unlock();
                    }
                }
            }
        } finally {
            if (null != statsCollector) {
                statsCollector.registerBufferMetrics(getMinScn(), this.lastWrittenScn(), this.getPrevScn(),
                        this.getBufferFreeSpace());
                statsCollector.registerTimestampOfFirstEvent(_timestampOfFirstEvent);
            }
            _eventState = WindowState.ENDED;
        }
    } catch (RuntimeException re) {
        _log.error("Got runtime exception in readEvents: " + re.getMessage(), re);
        _log.error("Buffer State: " + toString());
        throw re;
    } finally {
        _readBufferLock.unlock();
        writePos.close();
    }

    if (logDebugEnabled)
        _log.debug("readEvents result: " + readPos + " " + writePos);

    return readPos.getNumReadEvents();
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Serialize a row of Objects into a byte buffer.
 * // www .ja va2s.  c om
 * @param rowArray row data, expected to be correct length for this table
 * @param buffer buffer to which to write the row data
 * @param minRowSize min size for result row
 * @param rawVarValues optional, pre-written values for var length columns
 *                     (enables re-use of previously written values).
 * @return the given buffer, filled with the row data
 */
private ByteBuffer createRow(Object[] rowArray, ByteBuffer buffer, int minRowSize,
        Map<Column, byte[]> rawVarValues) throws IOException {
    buffer.putShort(_maxColumnCount);
    NullMask nullMask = new NullMask(_maxColumnCount);

    //Fixed length column data comes first
    int fixedDataStart = buffer.position();
    int fixedDataEnd = fixedDataStart;
    for (Column col : _columns) {

        if (col.isVariableLength()) {
            continue;
        }

        Object rowValue = col.getRowValue(rowArray);

        if (col.getType() == DataType.BOOLEAN) {

            if (Column.toBooleanValue(rowValue)) {
                //Booleans are stored in the null mask
                nullMask.markNotNull(col);
            }
            rowValue = null;
        }

        if (rowValue != null) {

            // we have a value to write
            nullMask.markNotNull(col);

            // remainingRowLength is ignored when writing fixed length data
            buffer.position(fixedDataStart + col.getFixedDataOffset());
            buffer.put(col.write(rowValue, 0));
        }

        // always insert space for the entire fixed data column length
        // (including null values), access expects the row to always be at least
        // big enough to hold all fixed values
        buffer.position(fixedDataStart + col.getFixedDataOffset() + col.getLength());

        // keep track of the end of fixed data
        if (buffer.position() > fixedDataEnd) {
            fixedDataEnd = buffer.position();
        }

    }

    // reposition at end of fixed data
    buffer.position(fixedDataEnd);

    // only need this info if this table contains any var length data
    if (_maxVarColumnCount > 0) {

        int maxRowSize = getFormat().MAX_ROW_SIZE;

        // figure out how much space remains for var length data.  first,
        // account for already written space
        maxRowSize -= buffer.position();
        // now, account for trailer space
        int trailerSize = (nullMask.byteSize() + 4 + (_maxVarColumnCount * 2));
        maxRowSize -= trailerSize;

        // for each non-null long value column we need to reserve a small
        // amount of space so that we don't end up running out of row space
        // later by being too greedy
        for (Column varCol : _varColumns) {
            if ((varCol.getType().isLongValue()) && (varCol.getRowValue(rowArray) != null)) {
                maxRowSize -= getFormat().SIZE_LONG_VALUE_DEF;
            }
        }

        //Now write out variable length column data
        short[] varColumnOffsets = new short[_maxVarColumnCount];
        int varColumnOffsetsIndex = 0;
        for (Column varCol : _varColumns) {
            short offset = (short) buffer.position();
            Object rowValue = varCol.getRowValue(rowArray);
            if (rowValue != null) {
                // we have a value
                nullMask.markNotNull(varCol);

                byte[] rawValue = null;
                ByteBuffer varDataBuf = null;
                if (((rawValue = rawVarValues.get(varCol)) != null) && (rawValue.length <= maxRowSize)) {
                    // save time and potentially db space, re-use raw value
                    varDataBuf = ByteBuffer.wrap(rawValue);
                } else {
                    // write column value
                    varDataBuf = varCol.write(rowValue, maxRowSize);
                }

                maxRowSize -= varDataBuf.remaining();
                if (varCol.getType().isLongValue()) {
                    // we already accounted for some amount of the long value data
                    // above.  add that space back so we don't double count
                    maxRowSize += getFormat().SIZE_LONG_VALUE_DEF;
                }
                buffer.put(varDataBuf);
            }

            // we do a loop here so that we fill in offsets for deleted columns
            while (varColumnOffsetsIndex <= varCol.getVarLenTableIndex()) {
                varColumnOffsets[varColumnOffsetsIndex++] = offset;
            }
        }

        // fill in offsets for any remaining deleted columns
        while (varColumnOffsetsIndex < varColumnOffsets.length) {
            varColumnOffsets[varColumnOffsetsIndex++] = (short) buffer.position();
        }

        // record where we stopped writing
        int eod = buffer.position();

        // insert padding if necessary
        padRowBuffer(buffer, minRowSize, trailerSize);

        buffer.putShort((short) eod); //EOD marker

        //Now write out variable length offsets
        //Offsets are stored in reverse order
        for (int i = _maxVarColumnCount - 1; i >= 0; i--) {
            buffer.putShort(varColumnOffsets[i]);
        }
        buffer.putShort(_maxVarColumnCount); //Number of var length columns

    } else {

        // insert padding for row w/ no var cols
        padRowBuffer(buffer, minRowSize, nullMask.byteSize());
    }

    nullMask.write(buffer); //Null mask
    buffer.flip();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Creating new data block:\n" + ByteUtil.toHexString(buffer, buffer.limit()));
    }
    return buffer;
}