Example usage for java.nio ByteBuffer reset

List of usage examples for java.nio ByteBuffer reset

Introduction

In this page you can find the example usage for java.nio ByteBuffer reset.

Prototype

public final Buffer reset() 

Source Link

Document

Resets the position of this buffer to the mark.

Usage

From source file:com.offbynull.portmapper.pcp.PcpController.java

/**
 * Constructs a {@link PcpController} object.
 * @param random used to generate nonce values for requests
 * @param gatewayAddress address of router/gateway
 * @param selfAddress address of this machine on the interface that can talk to the router/gateway
 * @param listener a listener to listen for all PCP packets from this router
 * @throws NullPointerException if any argument is {@code null}
 * @throws IOException if problems initializing UDP channels
 *///from w  ww .j  av  a 2 s  .com
public PcpController(Random random, InetAddress gatewayAddress, InetAddress selfAddress,
        final PcpControllerListener listener) throws IOException {
    Validate.notNull(random);
    Validate.notNull(gatewayAddress);
    Validate.notNull(selfAddress);

    this.gateway = new InetSocketAddress(gatewayAddress, 5351);

    List<DatagramChannel> channels = new ArrayList<>(3);

    try {
        unicastChannel = DatagramChannel.open();
        unicastChannel.configureBlocking(false);
        unicastChannel.socket().bind(new InetSocketAddress(0));

        ipv4MulticastChannel = DatagramChannel.open(StandardProtocolFamily.INET);
        ipv4MulticastChannel.configureBlocking(false);
        ipv4MulticastChannel.setOption(StandardSocketOptions.SO_REUSEADDR, true);
        ipv4MulticastChannel.socket().bind(new InetSocketAddress(5350));
        NetworkUtils.multicastListenOnAllIpv4InterfaceAddresses(ipv4MulticastChannel);

        ipv6MulticastChannel = DatagramChannel.open(StandardProtocolFamily.INET6);
        ipv6MulticastChannel.configureBlocking(false);
        ipv6MulticastChannel.setOption(StandardSocketOptions.SO_REUSEADDR, true);
        ipv6MulticastChannel.socket().bind(new InetSocketAddress(5350));
        NetworkUtils.multicastListenOnAllIpv6InterfaceAddresses(ipv6MulticastChannel);
    } catch (IOException ioe) {
        IOUtils.closeQuietly(unicastChannel);
        IOUtils.closeQuietly(ipv4MulticastChannel);
        IOUtils.closeQuietly(ipv6MulticastChannel);
        throw ioe;
    }

    channels.add(unicastChannel);
    channels.add(ipv4MulticastChannel);
    channels.add(ipv6MulticastChannel);

    this.communicator = new UdpCommunicator(channels);
    this.selfAddress = selfAddress;
    this.random = random;

    this.communicator.startAsync().awaitRunning();

    if (listener != null) {
        this.communicator.addListener(new UdpCommunicatorListener() {

            @Override
            public void incomingPacket(InetSocketAddress sourceAddress, DatagramChannel channel,
                    ByteBuffer packet) {
                CommunicationType type;
                if (channel == unicastChannel) {
                    type = CommunicationType.UNICAST;
                } else if (channel == ipv4MulticastChannel || channel == ipv6MulticastChannel) {
                    type = CommunicationType.MULTICAST;
                } else {
                    return; // unknown, do nothing
                }

                try {
                    packet.mark();
                    listener.incomingResponse(type, new AnnouncePcpResponse(packet));
                } catch (BufferUnderflowException | IllegalArgumentException e) { // NOPMD
                    // ignore
                } finally {
                    packet.reset();
                }

                try {
                    packet.mark();
                    listener.incomingResponse(type, new MapPcpResponse(packet));
                } catch (BufferUnderflowException | IllegalArgumentException e) { // NOPMD
                    // ignore
                } finally {
                    packet.reset();
                }

                try {
                    packet.mark();
                    listener.incomingResponse(type, new PeerPcpResponse(packet));
                } catch (BufferUnderflowException | IllegalArgumentException e) { // NOPMD
                    // ignore
                } finally {
                    packet.reset();
                }
            }
        });
    }
}

From source file:org.zuinnote.hadoop.bitcoin.format.BitcoinBlockReader.java

/**
* This function is used to read from a raw Bitcoin block some identifier. Note: Does not change ByteBuffer position
*
* @param rawByteBuffer ByteBuffer as read by readRawBlock
* @return byte array containing hashMerkleRoot and prevHashBlock
*
*//*from   w ww. j av  a  2 s.  c  o  m*/
public byte[] getKeyFromRawBlock(ByteBuffer rawByteBuffer) {
    rawByteBuffer.mark();
    byte[] magicNo = new byte[4];
    byte[] hashMerkleRoot = new byte[32];
    byte[] hashPrevBlock = new byte[32];
    // magic no (skip)
    rawByteBuffer.get(magicNo, 0, 4);
    // blocksize (skip)
    int currentBlockSize = rawByteBuffer.getInt();
    // version (skip)
    int currentVersion = rawByteBuffer.getInt();
    // hashPrevBlock
    rawByteBuffer.get(hashPrevBlock, 0, 32);
    // hashMerkleRoot
    rawByteBuffer.get(hashMerkleRoot, 0, 32);
    byte[] result = new byte[hashMerkleRoot.length + hashPrevBlock.length];
    for (int i = 0; i < hashMerkleRoot.length; i++) {
        result[i] = hashMerkleRoot[i];
    }
    for (int j = 0; j < hashPrevBlock.length; j++) {
        result[j + hashMerkleRoot.length] = hashPrevBlock[j];
    }
    rawByteBuffer.reset();
    return result;
}

From source file:com.slytechs.capture.file.editor.AbstractRawIterator.java

public boolean verifyAdditionalRecords(final ByteBuffer buffer, final int count)
        throws EOFException, IOException {

    buffer.reset();

    final int MAX_HEADER_LENGTH = 24;
    final ByteBuffer view = BufferUtils.duplicate(buffer);
    final int capacity = view.capacity();
    boolean status = true;

    for (int i = 0; i < count && view.position() + MAX_HEADER_LENGTH < capacity; i++) {
        view.mark();/*from www . j  ava 2s . c o  m*/
        long length = headerReader.readLength(view);
        int p = view.position() + (int) length;

        if (pattern.match(view) == false) {
            status = false;
            break;
        }
        view.reset();

        if (p + MAX_HEADER_LENGTH > view.capacity()) {
            break;
        }

        view.limit(p + MAX_HEADER_LENGTH);
        view.position(p);
    }

    return status;
}

From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java

/**
 * Tries to read an existing index from the given tar file. The index is
 * returned if it is found and looks valid (correct checksum, passes
 * sanity checks)./* ww w .jav a 2  s . c om*/
 *
 * @param file tar file
 * @param name name of the tar file, for logging purposes
 * @return tar index, or {@code null} if not found or not valid
 * @throws IOException if the tar file could not be read
 */
private static ByteBuffer loadAndValidateIndex(RandomAccessFile file, String name) throws IOException {
    long length = file.length();
    if (length % BLOCK_SIZE != 0 || length < 6 * BLOCK_SIZE || length > Integer.MAX_VALUE) {
        log.warn("Unexpected size {} of tar file {}", length, name);
        return null; // unexpected file size
    }

    // read the index metadata just before the two final zero blocks
    ByteBuffer meta = ByteBuffer.allocate(16);
    file.seek(length - 2 * BLOCK_SIZE - 16);
    file.readFully(meta.array());
    int crc32 = meta.getInt();
    int count = meta.getInt();
    int bytes = meta.getInt();
    int magic = meta.getInt();

    if (magic != INDEX_MAGIC) {
        return null; // magic byte mismatch
    }

    if (count < 1 || bytes < count * 24 + 16 || bytes % BLOCK_SIZE != 0) {
        log.warn("Invalid index metadata in tar file {}", name);
        return null; // impossible entry and/or byte counts
    }

    // this involves seeking backwards in the file, which might not
    // perform well, but that's OK since we only do this once per file
    ByteBuffer index = ByteBuffer.allocate(count * 24);
    file.seek(length - 2 * BLOCK_SIZE - 16 - count * 24);
    file.readFully(index.array());
    index.mark();

    CRC32 checksum = new CRC32();
    long limit = length - 2 * BLOCK_SIZE - bytes - BLOCK_SIZE;
    long lastmsb = Long.MIN_VALUE;
    long lastlsb = Long.MIN_VALUE;
    byte[] entry = new byte[24];
    for (int i = 0; i < count; i++) {
        index.get(entry);
        checksum.update(entry);

        ByteBuffer buffer = ByteBuffer.wrap(entry);
        long msb = buffer.getLong();
        long lsb = buffer.getLong();
        int offset = buffer.getInt();
        int size = buffer.getInt();

        if (lastmsb > msb || (lastmsb == msb && lastlsb > lsb)) {
            log.warn("Incorrect index ordering in tar file {}", name);
            return null;
        } else if (lastmsb == msb && lastlsb == lsb && i > 0) {
            log.warn("Duplicate index entry in tar file {}", name);
            return null;
        } else if (offset < 0 || offset % BLOCK_SIZE != 0) {
            log.warn("Invalid index entry offset in tar file {}", name);
            return null;
        } else if (size < 1 || offset + size > limit) {
            log.warn("Invalid index entry size in tar file {}", name);
            return null;
        }

        lastmsb = msb;
        lastlsb = lsb;
    }

    if (crc32 != (int) checksum.getValue()) {
        log.warn("Invalid index checksum in tar file {}", name);
        return null; // checksum mismatch
    }

    index.reset();
    return index;
}

From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java

/**
 * Tries to read an existing index from the given tar file. The index is
 * returned if it is found and looks valid (correct checksum, passes
 * sanity checks).//from w w  w. java2s . c o  m
 *
 * @param file tar file
 * @param name name of the tar file, for logging purposes
 * @return tar index, or {@code null} if not found or not valid
 * @throws IOException if the tar file could not be read
 */
private static ByteBuffer loadAndValidateIndex(RandomAccessFile file, String name) throws IOException {
    long length = file.length();
    if (length % BLOCK_SIZE != 0 || length < 6 * BLOCK_SIZE || length > Integer.MAX_VALUE) {
        log.warn("Unexpected size {} of tar file {}", length, name);
        return null; // unexpected file size
    }

    // read the index metadata just before the two final zero blocks
    ByteBuffer meta = ByteBuffer.allocate(16);
    file.seek(length - 2 * BLOCK_SIZE - 16);
    file.readFully(meta.array());
    int crc32 = meta.getInt();
    int count = meta.getInt();
    int bytes = meta.getInt();
    int magic = meta.getInt();

    if (magic != INDEX_MAGIC) {
        return null; // magic byte mismatch
    }

    if (count < 1 || bytes < count * TarEntry.SIZE + 16 || bytes % BLOCK_SIZE != 0) {
        log.warn("Invalid index metadata in tar file {}", name);
        return null; // impossible entry and/or byte counts
    }

    // this involves seeking backwards in the file, which might not
    // perform well, but that's OK since we only do this once per file
    ByteBuffer index = ByteBuffer.allocate(count * TarEntry.SIZE);
    file.seek(length - 2 * BLOCK_SIZE - 16 - count * TarEntry.SIZE);
    file.readFully(index.array());
    index.mark();

    CRC32 checksum = new CRC32();
    long limit = length - 2 * BLOCK_SIZE - bytes - BLOCK_SIZE;
    long lastmsb = Long.MIN_VALUE;
    long lastlsb = Long.MIN_VALUE;
    byte[] entry = new byte[TarEntry.SIZE];
    for (int i = 0; i < count; i++) {
        index.get(entry);
        checksum.update(entry);

        ByteBuffer buffer = wrap(entry);
        long msb = buffer.getLong();
        long lsb = buffer.getLong();
        int offset = buffer.getInt();
        int size = buffer.getInt();

        if (lastmsb > msb || (lastmsb == msb && lastlsb > lsb)) {
            log.warn("Incorrect index ordering in tar file {}", name);
            return null;
        } else if (lastmsb == msb && lastlsb == lsb && i > 0) {
            log.warn("Duplicate index entry in tar file {}", name);
            return null;
        } else if (offset < 0 || offset % BLOCK_SIZE != 0) {
            log.warn("Invalid index entry offset in tar file {}", name);
            return null;
        } else if (size < 1 || offset + size > limit) {
            log.warn("Invalid index entry size in tar file {}", name);
            return null;
        }

        lastmsb = msb;
        lastlsb = lsb;
    }

    if (crc32 != (int) checksum.getValue()) {
        log.warn("Invalid index checksum in tar file {}", name);
        return null; // checksum mismatch
    }

    index.reset();
    return index;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Reads the column data from the given row buffer.  Leaves limit unchanged.
 * Caches the returned value in the rowState.
 *//*from ww w .  j  ava  2  s. c o m*/
private static Object getRowColumn(JetFormat format, ByteBuffer rowBuffer, Column column, RowState rowState,
        Map<Column, byte[]> rawVarValues) throws IOException {
    byte[] columnData = null;
    try {

        NullMask nullMask = rowState.getNullMask(rowBuffer);
        boolean isNull = nullMask.isNull(column);
        if (column.getType() == DataType.BOOLEAN) {
            // Boolean values are stored in the null mask.  see note about
            // caching below
            return rowState.setRowValue(column.getColumnIndex(), Boolean.valueOf(!isNull));
        } else if (isNull) {
            // well, that's easy! (no need to update cache w/ null)
            return null;
        }

        // reset position to row start
        rowBuffer.reset();

        // locate the column data bytes
        int rowStart = rowBuffer.position();
        int colDataPos = 0;
        int colDataLen = 0;
        if (!column.isVariableLength()) {

            // read fixed length value (non-boolean at this point)
            int dataStart = rowStart + format.OFFSET_COLUMN_FIXED_DATA_ROW_OFFSET;
            colDataPos = dataStart + column.getFixedDataOffset();
            colDataLen = column.getType().getFixedSize(column.getLength());

        } else {
            int varDataStart;
            int varDataEnd;

            if (format.SIZE_ROW_VAR_COL_OFFSET == 2) {

                // read simple var length value
                int varColumnOffsetPos = (rowBuffer.limit() - nullMask.byteSize() - 4)
                        - (column.getVarLenTableIndex() * 2);

                varDataStart = rowBuffer.getShort(varColumnOffsetPos);
                varDataEnd = rowBuffer.getShort(varColumnOffsetPos - 2);

            } else {

                // read jump-table based var length values
                short[] varColumnOffsets = readJumpTableVarColOffsets(rowState, rowBuffer, rowStart, nullMask);

                varDataStart = varColumnOffsets[column.getVarLenTableIndex()];
                varDataEnd = varColumnOffsets[column.getVarLenTableIndex() + 1];
            }

            colDataPos = rowStart + varDataStart;
            colDataLen = varDataEnd - varDataStart;
        }

        // grab the column data
        columnData = new byte[colDataLen];
        rowBuffer.position(colDataPos);
        rowBuffer.get(columnData);

        if ((rawVarValues != null) && column.isVariableLength()) {
            // caller wants raw value as well
            rawVarValues.put(column, columnData);
        }

        // parse the column data.  we cache the row values in order to be able
        // to update the index on row deletion.  note, most of the returned
        // values are immutable, except for binary data (returned as byte[]),
        // but binary data shouldn't be indexed anyway.
        return rowState.setRowValue(column.getColumnIndex(), column.read(columnData));

    } catch (Exception e) {

        // cache "raw" row value.  see note about caching above
        rowState.setRowValue(column.getColumnIndex(), Column.rawDataWrapper(columnData));

        return rowState.handleRowError(column, columnData, e);
    }
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Reads the column data from the given row buffer.  Leaves limit unchanged.
 * Caches the returned value in the rowState.
 */// w  ww .  j  a v a 2s  .c o m
private static Object getRowColumn(JetFormat format, ByteBuffer rowBuffer, ColumnImpl column, RowState rowState,
        Map<ColumnImpl, byte[]> rawVarValues) throws IOException {
    byte[] columnData = null;
    try {

        NullMask nullMask = rowState.getNullMask(rowBuffer);
        boolean isNull = nullMask.isNull(column);
        if (column.storeInNullMask()) {
            // Boolean values are stored in the null mask.  see note about
            // caching below
            return rowState.setRowCacheValue(column.getColumnIndex(), column.readFromNullMask(isNull));
        } else if (isNull) {
            // well, that's easy! (no need to update cache w/ null)
            return null;
        }

        Object cachedValue = rowState.getRowCacheValue(column.getColumnIndex());
        if (cachedValue != null) {
            // we already have it, use it
            return cachedValue;
        }

        // reset position to row start
        rowBuffer.reset();

        // locate the column data bytes
        int rowStart = rowBuffer.position();
        int colDataPos = 0;
        int colDataLen = 0;
        if (!column.isVariableLength()) {

            // read fixed length value (non-boolean at this point)
            int dataStart = rowStart + format.OFFSET_COLUMN_FIXED_DATA_ROW_OFFSET;
            colDataPos = dataStart + column.getFixedDataOffset();
            colDataLen = column.getType().getFixedSize(column.getLength());

        } else {
            int varDataStart;
            int varDataEnd;

            if (format.SIZE_ROW_VAR_COL_OFFSET == 2) {

                // read simple var length value
                int varColumnOffsetPos = (rowBuffer.limit() - nullMask.byteSize() - 4)
                        - (column.getVarLenTableIndex() * 2);

                varDataStart = rowBuffer.getShort(varColumnOffsetPos);
                varDataEnd = rowBuffer.getShort(varColumnOffsetPos - 2);

            } else {

                // read jump-table based var length values
                short[] varColumnOffsets = readJumpTableVarColOffsets(rowState, rowBuffer, rowStart, nullMask);

                varDataStart = varColumnOffsets[column.getVarLenTableIndex()];
                varDataEnd = varColumnOffsets[column.getVarLenTableIndex() + 1];
            }

            colDataPos = rowStart + varDataStart;
            colDataLen = varDataEnd - varDataStart;
        }

        // grab the column data
        rowBuffer.position(colDataPos);
        columnData = ByteUtil.getBytes(rowBuffer, colDataLen);

        if ((rawVarValues != null) && column.isVariableLength()) {
            // caller wants raw value as well
            rawVarValues.put(column, columnData);
        }

        // parse the column data.  we cache the row values in order to be able
        // to update the index on row deletion.  note, most of the returned
        // values are immutable, except for binary data (returned as byte[]),
        // but binary data shouldn't be indexed anyway.
        return rowState.setRowCacheValue(column.getColumnIndex(), column.read(columnData));

    } catch (Exception e) {

        // cache "raw" row value.  see note about caching above
        rowState.setRowCacheValue(column.getColumnIndex(), ColumnImpl.rawDataWrapper(columnData));

        return rowState.handleRowError(column, columnData, e);
    }
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Reads the null mask from the given row buffer.  Leaves limit unchanged.
 *//*from   www.  j a  v a2 s.co m*/
private NullMask getRowNullMask(ByteBuffer rowBuffer) throws IOException {
    // reset position to row start
    rowBuffer.reset();

    // Number of columns in this row
    int columnCount = ByteUtil.getUnsignedVarInt(rowBuffer, getFormat().SIZE_ROW_COLUMN_COUNT);

    // read null mask
    NullMask nullMask = new NullMask(columnCount);
    rowBuffer.position(rowBuffer.limit() - nullMask.byteSize()); //Null mask at end
    nullMask.read(rowBuffer);

    return nullMask;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Update the row on which the given rowState is currently positioned.
 * <p>/*from w w w . j ava2  s.c o  m*/
 * Note, this method is not generally meant to be used directly.  You should
 * use the {@link #updateCurrentRow} method or use the Cursor class, which
 * allows for more complex table interactions, e.g.
 * {@link Cursor#setCurrentRowValue} and {@link Cursor#updateCurrentRow}.
 * @usage _advanced_method_
 */
public void updateRow(RowState rowState, RowId rowId, Object... row) throws IOException {
    requireValidRowId(rowId);

    // ensure that the relevant row state is up-to-date
    ByteBuffer rowBuffer = positionAtRowData(rowState, rowId);
    int oldRowSize = rowBuffer.remaining();

    requireNonDeletedRow(rowState, rowId);

    // we need to make sure the row is the right length & type (fill with
    // null if too short).
    if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) {
        row = dupeRow(row, _columns.size());
    }

    // fill in any auto-numbers (we don't allow autonumber values to be
    // modified)
    handleAutoNumbersForUpdate(row, rowBuffer, rowState);

    // hang on to the raw values of var length columns we are "keeping".  this
    // will allow us to re-use pre-written var length data, which can save
    // space for things like long value columns.
    Map<Column, byte[]> rawVarValues = (!_varColumns.isEmpty() ? new HashMap<Column, byte[]>() : null);

    // fill in any "keep value" fields
    for (Column column : _columns) {
        if (column.getRowValue(row) == Column.KEEP_VALUE) {
            column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, rowState, rawVarValues));
        }
    }

    // generate new row bytes
    ByteBuffer newRowData = createRow(row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize,
            rawVarValues);

    if (newRowData.limit() > getFormat().MAX_ROW_SIZE) {
        throw new IOException("Row size " + newRowData.limit() + " is too large");
    }

    if (!_indexDatas.isEmpty()) {
        Object[] oldRowValues = rowState.getRowValues();

        // delete old values from indexes
        for (IndexData indexData : _indexDatas) {
            indexData.deleteRow(oldRowValues, rowId);
        }
    }

    // see if we can squeeze the new row data into the existing row
    rowBuffer.reset();
    int rowSize = newRowData.remaining();

    ByteBuffer dataPage = null;
    int pageNumber = PageChannel.INVALID_PAGE_NUMBER;

    if (oldRowSize >= rowSize) {

        // awesome, slap it in!
        rowBuffer.put(newRowData);

        // grab the page we just updated
        dataPage = rowState.getFinalPage();
        pageNumber = rowState.getFinalRowId().getPageNumber();

    } else {

        // bummer, need to find a new page for the data
        dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER);
        pageNumber = _addRowBufferH.getPageNumber();

        RowId headerRowId = rowState.getHeaderRowId();
        ByteBuffer headerPage = rowState.getHeaderPage();
        if (pageNumber == headerRowId.getPageNumber()) {
            // new row is on the same page as header row, share page
            dataPage = headerPage;
        }

        // write out the new row data (set the deleted flag on the new data row
        // so that it is ignored during normal table traversal)
        int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK);
        dataPage.put(newRowData);

        // write the overflow info into the header row and clear out the
        // remaining header data
        rowBuffer = PageChannel.narrowBuffer(headerPage,
                findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()),
                findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat()));
        rowBuffer.put((byte) rowNum);
        ByteUtil.put3ByteInt(rowBuffer, pageNumber);
        ByteUtil.clearRemaining(rowBuffer);

        // set the overflow flag on the header row
        int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat());
        headerPage.putShort(headerRowIndex, (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK));
        if (pageNumber != headerRowId.getPageNumber()) {
            writeDataPage(headerPage, headerRowId.getPageNumber());
        }
    }

    // update the indexes
    for (IndexData indexData : _indexDatas) {
        indexData.addRow(row, rowId);
    }

    writeDataPage(dataPage, pageNumber);

    updateTableDefinition(0);
}

From source file:com.linkedin.databus.core.DbusEventBuffer.java

/**
 * Copies the current event bytes from the staging buffer to the main buffer. Previous calls must
 * ensure that the target write area determined by writePos is already free.
 * @param readPos         determines the region in the staging buffer to copy from
 * @param writePos        determines the region in the main buffer to write to
 *//*from  w  w  w  .ja va 2  s  .c o m*/
private void copyReadEventToEventBuffer(ReadEventsReadPosition readPos, ReadEventsWritePosition writePos,
        Iterable<InternalDatabusEventsListener> eventListeners, DbusEventsStatisticsCollector statsCollector,
        boolean logDebugEnabled) {
    final ByteBuffer readBuffer = readPos.getReadBuffer();
    final int numBytesToWrite = readPos.bytesProcessed();
    final int writeStartOfs = writePos.getCurOfs();
    final ByteBuffer curBuf = writePos.getCurBuf();

    assert writePos.getNextFree().bufferGenId() - _head.bufferGenId() <= 1 : writePos.toString() + " buf:"
            + toString();

    assert curBuf.limit() >= writePos.getNextFreeOfs() : "curBuf:" + curBuf + "; " + writePos;

    final int oldLimit = readBuffer.limit();
    readBuffer.mark();
    readBuffer.position(readPos.getReadStart());
    readBuffer.limit(readPos.getPosition());

    // Set the limit/position
    curBuf.position(writeStartOfs);
    if (LOG.isDebugEnabled()) {
        LOG.debug("copying from " + readBuffer + " into " + writePos.getCurBuf() + "head:" + _head + " tail:"
                + _tail);
    }
    curBuf.put(readBuffer); // copy _readBuffer
    readBuffer.limit(oldLimit);
    readBuffer.reset();

    if (numBytesToWrite > 0) {
        // update index and call listeners on each event (may rewrite event)
        updateNewReadEvent(readPos, writePos, statsCollector, eventListeners, logDebugEnabled);
        if (readPos.getLastSeenStgWin() > _seenEndOfPeriodScn) {
            _seenEndOfPeriodScn = readPos.getLastSeenStgWin(); // this is end of period for this SCN
        }
    }
    if (logDebugEnabled)
        LOG.debug("Tail is set to :" + _tail + ", Head is at :" + _head);

    assert (_head.bufferIndex() != _tail.bufferIndex() || _head.getPosition() < _tail.getPosition()
            || _head.bufferOffset() < writePos.getCurBuf().limit());
}