Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.apache.qpid.server.store.berkeleydb.AbstractBDBMessageStore.java

/**
 * Fills the provided ByteBuffer with as much content for the specified message as possible, starting
 * from the specified offset in the message.
 *
 * @param messageId The message to get the data for.
 * @param offset    The offset of the data within the message.
 * @param dst       The destination of the content read back
 *
 * @return The number of bytes inserted into the destination
 *
 * @throws AMQStoreException If the operation fails for any reason, or if the specified message does not exist.
 *///from ww  w . ja  v a2 s .  co  m
public int getContent(long messageId, int offset, ByteBuffer dst) throws AMQStoreException {
    DatabaseEntry contentKeyEntry = new DatabaseEntry();
    LongBinding.longToEntry(messageId, contentKeyEntry);
    DatabaseEntry value = new DatabaseEntry();
    ContentBinding contentTupleBinding = ContentBinding.getInstance();

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("Message Id: " + messageId + " Getting content body from offset: " + offset);
    }

    try {

        int written = 0;
        OperationStatus status = _messageContentDb.get(null, contentKeyEntry, value, LockMode.READ_UNCOMMITTED);
        if (status == OperationStatus.SUCCESS) {
            byte[] dataAsBytes = contentTupleBinding.entryToObject(value);
            int size = dataAsBytes.length;
            if (offset > size) {
                throw new RuntimeException("Offset " + offset + " is greater than message size " + size
                        + " for message id " + messageId + "!");

            }

            written = size - offset;
            if (written > dst.remaining()) {
                written = dst.remaining();
            }

            dst.put(dataAsBytes, offset, written);
        }
        return written;
    } catch (DatabaseException e) {
        throw new AMQStoreException(
                "Error getting AMQMessage with id " + messageId + " to database: " + e.getMessage(), e);
    }
}

From source file:com.amazonaws.services.kinesis.producer.KinesisProducer.java

/**
 * Put a record asynchronously. A {@link ListenableFuture} is returned that
 * can be used to retrieve the result, either by polling or by registering a
 * callback./*from w  ww .j a  v  a  2s.  c o m*/
 * 
 * <p>
 * The return value can be disregarded if you do not wish to process the
 * result. Under the covers, the KPL will automatically reattempt puts in
 * case of transient errors (including throttling). A failed result is
 * generally returned only if an irrecoverable error is detected (e.g.
 * trying to put to a stream that doesn't exist), or if the record expires.
 *
 * <p>
 * <b>Thread safe.</b>
 * 
 * <p>
 * To add a listener to the future:
 * <p>
 * <code>
 * ListenableFuture&lt;PutRecordResult&gt; f = myKinesisProducer.addUserRecord(...);
 * com.google.common.util.concurrent.Futures.addCallback(f, callback, executor);
 * </code>
 * <p>
 * where <code>callback</code> is an instance of
 * {@link com.google.common.util.concurrent.FutureCallback} and
 * <code>executor</code> is an instance of
 * {@link java.util.concurrent.Executor}.
 * <p>
 * <b>Important:</b>
 * <p>
 * If long-running tasks are performed in the callbacks, it is recommended
 * that a custom executor be provided when registering callbacks to ensure
 * that there are enough threads to achieve the desired level of
 * parallelism. By default, the KPL will use an internal thread pool to
 * execute callbacks, but this pool may not have a sufficient number of
 * threads if a large number is desired.
 * <p>
 * Another option would be to hand the result off to a different component
 * for processing and keep the callback routine fast.
 * 
 * @param stream
 *            Stream to put to.
 * @param partitionKey
 *            Partition key. Length must be at least one, and at most 256
 *            (inclusive).
 * @param explicitHashKey
 *            The hash value used to explicitly determine the shard the data
 *            record is assigned to by overriding the partition key hash.
 *            Must be a valid string representation of a positive integer
 *            with value between 0 and <tt>2^128 - 1</tt> (inclusive).
 * @param data
 *            Binary data of the record. Maximum size 1MiB.
 * @return A future for the result of the put.
 * @throws IllegalArgumentException
 *             if input does not meet stated constraints
 * @throws DaemonException
 *             if the child process is dead
 * @see ListenableFuture
 * @see UserRecordResult
 * @see KinesisProducerConfiguration#setRecordTtl(long)
 * @see UserRecordFailedException
 */
public ListenableFuture<UserRecordResult> addUserRecord(String stream, String partitionKey,
        String explicitHashKey, ByteBuffer data) {
    if (stream == null) {
        throw new IllegalArgumentException("Stream name cannot be null");
    }

    stream = stream.trim();

    if (stream.length() == 0) {
        throw new IllegalArgumentException("Stream name cannot be empty");
    }

    if (partitionKey == null) {
        throw new IllegalArgumentException("partitionKey cannot be null");
    }

    if (partitionKey.length() < 1 || partitionKey.length() > 256) {
        throw new IllegalArgumentException(
                "Invalid parition key. Length must be at least 1 and at most 256, got "
                        + partitionKey.length());
    }

    try {
        partitionKey.getBytes("UTF-8");
    } catch (Exception e) {
        throw new IllegalArgumentException("Partition key must be valid UTF-8");
    }

    BigInteger b = null;
    if (explicitHashKey != null) {
        explicitHashKey = explicitHashKey.trim();
        try {
            b = new BigInteger(explicitHashKey);
        } catch (NumberFormatException e) {
            throw new IllegalArgumentException(
                    "Invalid explicitHashKey, must be an integer, got " + explicitHashKey);
        }
        if (b != null) {
            if (b.compareTo(UINT_128_MAX) > 0 || b.compareTo(BigInteger.ZERO) < 0) {
                throw new IllegalArgumentException(
                        "Invalid explicitHashKey, must be greater or equal to zero and less than or equal to (2^128 - 1), got "
                                + explicitHashKey);
            }
        }
    }

    if (data != null && data.remaining() > 1024 * 1024) {
        throw new IllegalArgumentException(
                "Data must be less than or equal to 1MB in size, got " + data.remaining() + " bytes");
    }

    long id = messageNumber.getAndIncrement();
    SettableFuture<UserRecordResult> f = SettableFuture.create();
    futures.put(id, f);

    PutRecord.Builder pr = PutRecord.newBuilder().setStreamName(stream).setPartitionKey(partitionKey)
            .setData(data != null ? ByteString.copyFrom(data) : ByteString.EMPTY);
    if (b != null) {
        pr.setExplicitHashKey(b.toString(10));
    }

    Message m = Message.newBuilder().setId(id).setPutRecord(pr.build()).build();
    child.add(m);

    return f;
}

From source file:org.apache.qpid.server.store.derby.DerbyMessageStore.java

public int getContent(long messageId, int offset, ByteBuffer dst) {
    Connection conn = null;/*w  ww .  j  a  v a2 s. co m*/
    PreparedStatement stmt = null;

    try {
        conn = newAutoCommitConnection();

        stmt = conn.prepareStatement(SELECT_FROM_MESSAGE_CONTENT);
        stmt.setLong(1, messageId);
        ResultSet rs = stmt.executeQuery();

        int written = 0;

        if (rs.next()) {

            Blob dataAsBlob = rs.getBlob(1);

            final int size = (int) dataAsBlob.length();
            byte[] dataAsBytes = dataAsBlob.getBytes(1, size);

            if (offset > size) {
                throw new RuntimeException("Offset " + offset + " is greater than message size " + size
                        + " for message id " + messageId + "!");

            }

            written = size - offset;
            if (written > dst.remaining()) {
                written = dst.remaining();
            }

            dst.put(dataAsBytes, offset, written);
        }

        return written;

    } catch (SQLException e) {
        throw new RuntimeException("Error retrieving content from offset " + offset + " for message "
                + messageId + ": " + e.getMessage(), e);
    } finally {
        closePreparedStatement(stmt);
        closeConnection(conn);
    }

}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Update the row for the given rowId.// w w  w .j  a  va 2s  .  c om
 * @usage _advanced_method_
 */
public Object[] updateRow(RowState rowState, RowIdImpl rowId, Object... row) throws IOException {
    requireValidRowId(rowId);

    getPageChannel().startWrite();
    try {

        // ensure that the relevant row state is up-to-date
        ByteBuffer rowBuffer = positionAtRowData(rowState, rowId);
        int oldRowSize = rowBuffer.remaining();

        requireNonDeletedRow(rowState, rowId);

        // we need to make sure the row is the right length & type (fill with
        // null if too short).
        if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) {
            row = dupeRow(row, _columns.size());
        }

        // hang on to the raw values of var length columns we are "keeping".  this
        // will allow us to re-use pre-written var length data, which can save
        // space for things like long value columns.
        Map<ColumnImpl, byte[]> keepRawVarValues = (!_varColumns.isEmpty() ? new HashMap<ColumnImpl, byte[]>()
                : null);

        // handle various value massaging activities
        for (ColumnImpl column : _columns) {

            Object rowValue = null;
            if (column.isAutoNumber()) {

                // fill in any auto-numbers (we don't allow autonumber values to be
                // modified)
                rowValue = getRowColumn(getFormat(), rowBuffer, column, rowState, null);

            } else {

                rowValue = column.getRowValue(row);
                if (rowValue == Column.KEEP_VALUE) {

                    // fill in any "keep value" fields (restore old value)
                    rowValue = getRowColumn(getFormat(), rowBuffer, column, rowState, keepRawVarValues);

                } else {

                    // set oldValue to something that could not possibly be a real value
                    Object oldValue = Column.KEEP_VALUE;
                    if (_indexColumns.contains(column)) {
                        // read (old) row value to help update indexes
                        oldValue = getRowColumn(getFormat(), rowBuffer, column, rowState, null);
                    } else {
                        oldValue = rowState.getRowCacheValue(column.getColumnIndex());
                    }

                    // if the old value was passed back in, we don't need to validate
                    if (oldValue != rowValue) {
                        // pass input value through column validator
                        rowValue = column.validate(rowValue);
                    }
                }
            }

            column.setRowValue(row, rowValue);
        }

        // generate new row bytes
        ByteBuffer newRowData = createRow(row, _writeRowBufferH.getPageBuffer(getPageChannel()), oldRowSize,
                keepRawVarValues);

        if (newRowData.limit() > getFormat().MAX_ROW_SIZE) {
            throw new IOException("Row size " + newRowData.limit() + " is too large");
        }

        if (!_indexDatas.isEmpty()) {

            IndexData.PendingChange idxChange = null;
            try {

                Object[] oldRowValues = rowState.getRowCacheValues();

                // check foreign keys before actually updating
                _fkEnforcer.updateRow(oldRowValues, row);

                // prepare index updates
                for (IndexData indexData : _indexDatas) {
                    idxChange = indexData.prepareUpdateRow(oldRowValues, rowId, row, idxChange);
                }

                // complete index updates
                IndexData.commitAll(idxChange);

            } catch (ConstraintViolationException ce) {
                IndexData.rollbackAll(idxChange);
                throw ce;
            }
        }

        // see if we can squeeze the new row data into the existing row
        rowBuffer.reset();
        int rowSize = newRowData.remaining();

        ByteBuffer dataPage = null;
        int pageNumber = PageChannel.INVALID_PAGE_NUMBER;

        if (oldRowSize >= rowSize) {

            // awesome, slap it in!
            rowBuffer.put(newRowData);

            // grab the page we just updated
            dataPage = rowState.getFinalPage();
            pageNumber = rowState.getFinalRowId().getPageNumber();

        } else {

            // bummer, need to find a new page for the data
            dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER);
            pageNumber = _addRowBufferH.getPageNumber();

            RowIdImpl headerRowId = rowState.getHeaderRowId();
            ByteBuffer headerPage = rowState.getHeaderPage();
            if (pageNumber == headerRowId.getPageNumber()) {
                // new row is on the same page as header row, share page
                dataPage = headerPage;
            }

            // write out the new row data (set the deleted flag on the new data row
            // so that it is ignored during normal table traversal)
            int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK);
            dataPage.put(newRowData);

            // write the overflow info into the header row and clear out the
            // remaining header data
            rowBuffer = PageChannel.narrowBuffer(headerPage,
                    findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()),
                    findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat()));
            rowBuffer.put((byte) rowNum);
            ByteUtil.put3ByteInt(rowBuffer, pageNumber);
            ByteUtil.clearRemaining(rowBuffer);

            // set the overflow flag on the header row
            int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat());
            headerPage.putShort(headerRowIndex,
                    (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK));
            if (pageNumber != headerRowId.getPageNumber()) {
                writeDataPage(headerPage, headerRowId.getPageNumber());
            }
        }

        writeDataPage(dataPage, pageNumber);

        updateTableDefinition(0);

    } finally {
        getPageChannel().finishWrite();
    }

    return row;
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Add multiple rows to this table, only writing to disk after all
 * rows have been written, and every time a data page is filled.
 * @param inRows List of Object[] row values
 *///from w  w  w.java 2  s  . c om
private List<? extends Object[]> addRows(List<? extends Object[]> rows, final boolean isBatchWrite)
        throws IOException {
    if (rows.isEmpty()) {
        return rows;
    }

    getPageChannel().startWrite();
    try {

        ByteBuffer dataPage = null;
        int pageNumber = PageChannel.INVALID_PAGE_NUMBER;
        int updateCount = 0;
        int autoNumAssignCount = 0;
        try {

            List<Object[]> dupeRows = null;
            final int numCols = _columns.size();
            for (int i = 0; i < rows.size(); i++) {

                // we need to make sure the row is the right length and is an
                // Object[] (fill with null if too short).  note, if the row is
                // copied the caller will not be able to access any generated
                // auto-number value, but if they need that info they should use a
                // row array of the right size/type!
                Object[] row = rows.get(i);
                if ((row.length < numCols) || (row.getClass() != Object[].class)) {
                    row = dupeRow(row, numCols);
                    // copy the input rows to a modifiable list so we can update the
                    // elements
                    if (dupeRows == null) {
                        dupeRows = new ArrayList<Object[]>(rows);
                        rows = dupeRows;
                    }
                    // we copied the row, so put the copy back into the rows list
                    dupeRows.set(i, row);
                }

                // handle various value massaging activities
                for (ColumnImpl column : _columns) {
                    if (!column.isAutoNumber()) {
                        // pass input value through column validator
                        column.setRowValue(row, column.validate(column.getRowValue(row)));
                    }
                }

                // fill in autonumbers
                handleAutoNumbersForAdd(row);
                ++autoNumAssignCount;

                // write the row of data to a temporary buffer
                ByteBuffer rowData = createRow(row, _writeRowBufferH.getPageBuffer(getPageChannel()));

                int rowSize = rowData.remaining();
                if (rowSize > getFormat().MAX_ROW_SIZE) {
                    throw new IOException("Row size " + rowSize + " is too large");
                }

                // get page with space
                dataPage = findFreeRowSpace(rowSize, dataPage, pageNumber);
                pageNumber = _addRowBufferH.getPageNumber();

                // determine where this row will end up on the page
                int rowNum = getRowsOnDataPage(dataPage, getFormat());

                RowIdImpl rowId = new RowIdImpl(pageNumber, rowNum);

                // before we actually write the row data, we verify all the database
                // constraints.
                if (!_indexDatas.isEmpty()) {

                    IndexData.PendingChange idxChange = null;
                    try {

                        // handle foreign keys before adding to table
                        _fkEnforcer.addRow(row);

                        // prepare index updates
                        for (IndexData indexData : _indexDatas) {
                            idxChange = indexData.prepareAddRow(row, rowId, idxChange);
                        }

                        // complete index updates
                        IndexData.commitAll(idxChange);

                    } catch (ConstraintViolationException ce) {
                        IndexData.rollbackAll(idxChange);
                        throw ce;
                    }
                }

                // we have satisfied all the constraints, write the row
                addDataPageRow(dataPage, rowSize, getFormat(), 0);
                dataPage.put(rowData);

                // return rowTd if desired
                if ((row.length > numCols) && (row[numCols] == ColumnImpl.RETURN_ROW_ID)) {
                    row[numCols] = rowId;
                }

                ++updateCount;
            }

            writeDataPage(dataPage, pageNumber);

            // Update tdef page
            updateTableDefinition(rows.size());

        } catch (Exception rowWriteFailure) {

            boolean isWriteFailure = isWriteFailure(rowWriteFailure);

            if (!isWriteFailure && (autoNumAssignCount > updateCount)) {
                // we assigned some autonumbers which won't get written.  attempt to
                // recover them so we don't get ugly "holes"
                restoreAutoNumbersFromAdd(rows.get(autoNumAssignCount - 1));
            }

            if (!isBatchWrite) {
                // just re-throw the original exception
                if (rowWriteFailure instanceof IOException) {
                    throw (IOException) rowWriteFailure;
                }
                throw (RuntimeException) rowWriteFailure;
            }

            // attempt to resolve a partial batch write
            if (isWriteFailure) {

                // we don't really know the status of any of the rows, so clear the
                // update count
                updateCount = 0;

            } else if (updateCount > 0) {

                // attempt to flush the rows already written to disk
                try {

                    writeDataPage(dataPage, pageNumber);

                    // Update tdef page
                    updateTableDefinition(updateCount);

                } catch (Exception flushFailure) {
                    // the flush failure is "worse" as it implies possible database
                    // corruption (failed write vs. a row failure which was not a
                    // write failure).  we don't know the status of any rows at this
                    // point (and the original failure is probably irrelevant)
                    LOG.warn("Secondary row failure which preceded the write failure", rowWriteFailure);
                    updateCount = 0;
                    rowWriteFailure = flushFailure;
                }
            }

            throw new BatchUpdateException(updateCount, rowWriteFailure);
        }

    } finally {
        getPageChannel().finishWrite();
    }

    return rows;
}

From source file:org.apache.hadoop.hbase.KeyValue.java

public KeyValue(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength,
        ByteBuffer qualifier, long ts, Type type, ByteBuffer value, List<Tag> tags) {
    this.bytes = createByteArray(row, roffset, rlength, family, foffset, flength, qualifier, 0,
            qualifier == null ? 0 : qualifier.remaining(), ts, type, value, 0,
            value == null ? 0 : value.remaining(), tags);
    this.length = bytes.length;
    this.offset = 0;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Serialize a row of Objects into a byte buffer.
 * /*from w  w  w  .j  a v a 2s . c om*/
 * @param rowArray row data, expected to be correct length for this table
 * @param buffer buffer to which to write the row data
 * @param minRowSize min size for result row
 * @param rawVarValues optional, pre-written values for var length columns
 *                     (enables re-use of previously written values).
 * @return the given buffer, filled with the row data
 */
private ByteBuffer createRow(Object[] rowArray, ByteBuffer buffer, int minRowSize,
        Map<Column, byte[]> rawVarValues) throws IOException {
    buffer.putShort(_maxColumnCount);
    NullMask nullMask = new NullMask(_maxColumnCount);

    //Fixed length column data comes first
    int fixedDataStart = buffer.position();
    int fixedDataEnd = fixedDataStart;
    for (Column col : _columns) {

        if (col.isVariableLength()) {
            continue;
        }

        Object rowValue = col.getRowValue(rowArray);

        if (col.getType() == DataType.BOOLEAN) {

            if (Column.toBooleanValue(rowValue)) {
                //Booleans are stored in the null mask
                nullMask.markNotNull(col);
            }
            rowValue = null;
        }

        if (rowValue != null) {

            // we have a value to write
            nullMask.markNotNull(col);

            // remainingRowLength is ignored when writing fixed length data
            buffer.position(fixedDataStart + col.getFixedDataOffset());
            buffer.put(col.write(rowValue, 0));
        }

        // always insert space for the entire fixed data column length
        // (including null values), access expects the row to always be at least
        // big enough to hold all fixed values
        buffer.position(fixedDataStart + col.getFixedDataOffset() + col.getLength());

        // keep track of the end of fixed data
        if (buffer.position() > fixedDataEnd) {
            fixedDataEnd = buffer.position();
        }

    }

    // reposition at end of fixed data
    buffer.position(fixedDataEnd);

    // only need this info if this table contains any var length data
    if (_maxVarColumnCount > 0) {

        int maxRowSize = getFormat().MAX_ROW_SIZE;

        // figure out how much space remains for var length data.  first,
        // account for already written space
        maxRowSize -= buffer.position();
        // now, account for trailer space
        int trailerSize = (nullMask.byteSize() + 4 + (_maxVarColumnCount * 2));
        maxRowSize -= trailerSize;

        // for each non-null long value column we need to reserve a small
        // amount of space so that we don't end up running out of row space
        // later by being too greedy
        for (Column varCol : _varColumns) {
            if ((varCol.getType().isLongValue()) && (varCol.getRowValue(rowArray) != null)) {
                maxRowSize -= getFormat().SIZE_LONG_VALUE_DEF;
            }
        }

        //Now write out variable length column data
        short[] varColumnOffsets = new short[_maxVarColumnCount];
        int varColumnOffsetsIndex = 0;
        for (Column varCol : _varColumns) {
            short offset = (short) buffer.position();
            Object rowValue = varCol.getRowValue(rowArray);
            if (rowValue != null) {
                // we have a value
                nullMask.markNotNull(varCol);

                byte[] rawValue = null;
                ByteBuffer varDataBuf = null;
                if (((rawValue = rawVarValues.get(varCol)) != null) && (rawValue.length <= maxRowSize)) {
                    // save time and potentially db space, re-use raw value
                    varDataBuf = ByteBuffer.wrap(rawValue);
                } else {
                    // write column value
                    varDataBuf = varCol.write(rowValue, maxRowSize);
                }

                maxRowSize -= varDataBuf.remaining();
                if (varCol.getType().isLongValue()) {
                    // we already accounted for some amount of the long value data
                    // above.  add that space back so we don't double count
                    maxRowSize += getFormat().SIZE_LONG_VALUE_DEF;
                }
                buffer.put(varDataBuf);
            }

            // we do a loop here so that we fill in offsets for deleted columns
            while (varColumnOffsetsIndex <= varCol.getVarLenTableIndex()) {
                varColumnOffsets[varColumnOffsetsIndex++] = offset;
            }
        }

        // fill in offsets for any remaining deleted columns
        while (varColumnOffsetsIndex < varColumnOffsets.length) {
            varColumnOffsets[varColumnOffsetsIndex++] = (short) buffer.position();
        }

        // record where we stopped writing
        int eod = buffer.position();

        // insert padding if necessary
        padRowBuffer(buffer, minRowSize, trailerSize);

        buffer.putShort((short) eod); //EOD marker

        //Now write out variable length offsets
        //Offsets are stored in reverse order
        for (int i = _maxVarColumnCount - 1; i >= 0; i--) {
            buffer.putShort(varColumnOffsets[i]);
        }
        buffer.putShort(_maxVarColumnCount); //Number of var length columns

    } else {

        // insert padding for row w/ no var cols
        padRowBuffer(buffer, minRowSize, nullMask.byteSize());
    }

    nullMask.write(buffer); //Null mask
    buffer.flip();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Creating new data block:\n" + ByteUtil.toHexString(buffer, buffer.limit()));
    }
    return buffer;
}

From source file:co.paralleluniverse.galaxy.core.Cache.java

private boolean writeData(CacheLine line, ByteBuffer data) {
    if (data.remaining() > maxItemSize)
        throw new IllegalArgumentException("Data size is " + data.remaining()
                + " bytes and exceeds the limit of " + maxItemSize + " bytes.");

    if (compareBeforeWrite) {
        if (line.data != null && data.remaining() == line.data.remaining()) {
            final int p1 = line.data.position();
            final int p2 = data.position();
            boolean modified = false;
            for (int i = 0; i < data.remaining(); i++) {
                if (line.data.get(p1 + i) != data.get(p2 + i)) {
                    modified = true;// ww w.j ava 2  s. c o m
                    break;
                }
            }
            if (!modified)
                return false;
        }
    }

    allocateLineData(line, data.remaining());
    line.data.put(data);
    line.data.flip();
    return true;
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Serialize a row of Objects into a byte buffer.
 * //from w  ww. jav a  2  s.c  o m
 * @param rowArray row data, expected to be correct length for this table
 * @param buffer buffer to which to write the row data
 * @param minRowSize min size for result row
 * @param rawVarValues optional, pre-written values for var length columns
 *                     (enables re-use of previously written values).
 * @return the given buffer, filled with the row data
 */
private ByteBuffer createRow(Object[] rowArray, ByteBuffer buffer, int minRowSize,
        Map<ColumnImpl, byte[]> rawVarValues) throws IOException {
    buffer.putShort(_maxColumnCount);
    NullMask nullMask = new NullMask(_maxColumnCount);

    //Fixed length column data comes first
    int fixedDataStart = buffer.position();
    int fixedDataEnd = fixedDataStart;
    for (ColumnImpl col : _columns) {

        if (col.isVariableLength()) {
            continue;
        }

        Object rowValue = col.getRowValue(rowArray);

        if (col.storeInNullMask()) {

            if (col.writeToNullMask(rowValue)) {
                nullMask.markNotNull(col);
            }
            rowValue = null;
        }

        if (rowValue != null) {

            // we have a value to write
            nullMask.markNotNull(col);

            // remainingRowLength is ignored when writing fixed length data
            buffer.position(fixedDataStart + col.getFixedDataOffset());
            buffer.put(col.write(rowValue, 0));
        }

        // always insert space for the entire fixed data column length
        // (including null values), access expects the row to always be at least
        // big enough to hold all fixed values
        buffer.position(fixedDataStart + col.getFixedDataOffset() + col.getLength());

        // keep track of the end of fixed data
        if (buffer.position() > fixedDataEnd) {
            fixedDataEnd = buffer.position();
        }

    }

    // reposition at end of fixed data
    buffer.position(fixedDataEnd);

    // only need this info if this table contains any var length data
    if (_maxVarColumnCount > 0) {

        int maxRowSize = getFormat().MAX_ROW_SIZE;

        // figure out how much space remains for var length data.  first,
        // account for already written space
        maxRowSize -= buffer.position();
        // now, account for trailer space
        int trailerSize = (nullMask.byteSize() + 4 + (_maxVarColumnCount * 2));
        maxRowSize -= trailerSize;

        // for each non-null long value column we need to reserve a small
        // amount of space so that we don't end up running out of row space
        // later by being too greedy
        for (ColumnImpl varCol : _varColumns) {
            if ((varCol.getType().isLongValue()) && (varCol.getRowValue(rowArray) != null)) {
                maxRowSize -= getFormat().SIZE_LONG_VALUE_DEF;
            }
        }

        //Now write out variable length column data
        short[] varColumnOffsets = new short[_maxVarColumnCount];
        int varColumnOffsetsIndex = 0;
        for (ColumnImpl varCol : _varColumns) {
            short offset = (short) buffer.position();
            Object rowValue = varCol.getRowValue(rowArray);
            if (rowValue != null) {
                // we have a value
                nullMask.markNotNull(varCol);

                byte[] rawValue = null;
                ByteBuffer varDataBuf = null;
                if (((rawValue = rawVarValues.get(varCol)) != null) && (rawValue.length <= maxRowSize)) {
                    // save time and potentially db space, re-use raw value
                    varDataBuf = ByteBuffer.wrap(rawValue);
                } else {
                    // write column value
                    varDataBuf = varCol.write(rowValue, maxRowSize);
                }

                maxRowSize -= varDataBuf.remaining();
                if (varCol.getType().isLongValue()) {
                    // we already accounted for some amount of the long value data
                    // above.  add that space back so we don't double count
                    maxRowSize += getFormat().SIZE_LONG_VALUE_DEF;
                }
                buffer.put(varDataBuf);
            }

            // we do a loop here so that we fill in offsets for deleted columns
            while (varColumnOffsetsIndex <= varCol.getVarLenTableIndex()) {
                varColumnOffsets[varColumnOffsetsIndex++] = offset;
            }
        }

        // fill in offsets for any remaining deleted columns
        while (varColumnOffsetsIndex < varColumnOffsets.length) {
            varColumnOffsets[varColumnOffsetsIndex++] = (short) buffer.position();
        }

        // record where we stopped writing
        int eod = buffer.position();

        // insert padding if necessary
        padRowBuffer(buffer, minRowSize, trailerSize);

        buffer.putShort((short) eod); //EOD marker

        //Now write out variable length offsets
        //Offsets are stored in reverse order
        for (int i = _maxVarColumnCount - 1; i >= 0; i--) {
            buffer.putShort(varColumnOffsets[i]);
        }
        buffer.putShort(_maxVarColumnCount); //Number of var length columns

    } else {

        // insert padding for row w/ no var cols
        padRowBuffer(buffer, minRowSize, nullMask.byteSize());
    }

    nullMask.write(buffer); //Null mask
    buffer.flip();
    return buffer;
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Writes a new table defined by the given TableCreator to the database.
 * @usage _advanced_method_/*from   w ww. j  a  v a 2  s  . c  o m*/
 */
protected static void writeTableDefinition(TableCreator creator) throws IOException {
    // first, create the usage map page
    createUsageMapDefinitionBuffer(creator);

    // next, determine how big the table def will be (in case it will be more
    // than one page)
    JetFormat format = creator.getFormat();
    int idxDataLen = (creator.getIndexCount() * (format.SIZE_INDEX_DEFINITION + format.SIZE_INDEX_COLUMN_BLOCK))
            + (creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
    int colUmapLen = creator.getLongValueColumns().size() * 10;
    int totalTableDefSize = format.SIZE_TDEF_HEADER
            + (format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) + idxDataLen + colUmapLen
            + format.SIZE_TDEF_TRAILER;

    // total up the amount of space used by the column and index names (2
    // bytes per char + 2 bytes for the length)
    for (ColumnBuilder col : creator.getColumns()) {
        int nameByteLen = (col.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    for (IndexBuilder idx : creator.getIndexes()) {
        int nameByteLen = (idx.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    // now, create the table definition
    PageChannel pageChannel = creator.getPageChannel();
    ByteBuffer buffer = PageChannel.createBuffer(Math.max(totalTableDefSize, format.PAGE_SIZE));
    writeTableDefinitionHeader(creator, buffer, totalTableDefSize);

    if (creator.hasIndexes()) {
        // index row counts
        IndexData.writeRowCountDefinitions(creator, buffer);
    }

    // column definitions
    ColumnImpl.writeDefinitions(creator, buffer);

    if (creator.hasIndexes()) {
        // index and index data definitions
        IndexData.writeDefinitions(creator, buffer);
        IndexImpl.writeDefinitions(creator, buffer);
    }

    // write long value column usage map references
    for (ColumnBuilder lvalCol : creator.getLongValueColumns()) {
        buffer.putShort(lvalCol.getColumnNumber());
        TableCreator.ColumnState colState = creator.getColumnState(lvalCol);

        // owned pages umap (both are on same page)
        buffer.put(colState.getUmapOwnedRowNumber());
        ByteUtil.put3ByteInt(buffer, colState.getUmapPageNumber());
        // free space pages umap
        buffer.put(colState.getUmapFreeRowNumber());
        ByteUtil.put3ByteInt(buffer, colState.getUmapPageNumber());
    }

    //End of tabledef
    buffer.put((byte) 0xff);
    buffer.put((byte) 0xff);

    // write table buffer to database
    if (totalTableDefSize <= format.PAGE_SIZE) {

        // easy case, fits on one page
        buffer.putShort(format.OFFSET_FREE_SPACE, (short) (buffer.remaining() - 8)); // overwrite page free space
        // Write the tdef page to disk.
        pageChannel.writePage(buffer, creator.getTdefPageNumber());

    } else {

        // need to split across multiple pages
        ByteBuffer partialTdef = pageChannel.createPageBuffer();
        buffer.rewind();
        int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
        while (buffer.hasRemaining()) {

            // reset for next write
            partialTdef.clear();

            if (nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {

                // this is the first page.  note, the first page already has the
                // page header, so no need to write it here
                nextTdefPageNumber = creator.getTdefPageNumber();

            } else {

                // write page header
                writeTablePageHeader(partialTdef);
            }

            // copy the next page of tdef bytes
            int curTdefPageNumber = nextTdefPageNumber;
            int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
            partialTdef.put(buffer.array(), buffer.position(), writeLen);
            ByteUtil.forward(buffer, writeLen);

            if (buffer.hasRemaining()) {
                // need a next page
                nextTdefPageNumber = pageChannel.allocateNewPage();
                partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE, nextTdefPageNumber);
            }

            // update page free space
            partialTdef.putShort(format.OFFSET_FREE_SPACE, (short) (partialTdef.remaining() - 8)); // overwrite page free space

            // write partial page to disk
            pageChannel.writePage(partialTdef, curTdefPageNumber);
        }

    }
}