Example usage for java.nio ByteBuffer flip

List of usage examples for java.nio ByteBuffer flip

Introduction

In this page you can find the example usage for java.nio ByteBuffer flip.

Prototype

public final Buffer flip() 

Source Link

Document

Flips this buffer.

Usage

From source file:com.healthmarketscience.jackcess.Column.java

/**
 * Write an LVAL column into a ByteBuffer inline if it fits, otherwise in
 * other data page(s)./*from  w  w w .j av  a  2 s . c  o  m*/
 * @param value Value of the LVAL column
 * @return A buffer containing the LVAL definition and (possibly) the column
 *         value (unless written to other pages)
 * @usage _advanced_method_
 */
public ByteBuffer writeLongValue(byte[] value, int remainingRowLength) throws IOException {
    if (value.length > getType().getMaxSize()) {
        throw new IOException(
                "value too big for column, max " + getType().getMaxSize() + ", got " + value.length);
    }

    // determine which type to write
    byte type = 0;
    int lvalDefLen = getFormat().SIZE_LONG_VALUE_DEF;
    if (((getFormat().SIZE_LONG_VALUE_DEF + value.length) <= remainingRowLength)
            && (value.length <= getFormat().MAX_INLINE_LONG_VALUE_SIZE)) {
        type = LONG_VALUE_TYPE_THIS_PAGE;
        lvalDefLen += value.length;
    } else if (value.length <= getFormat().MAX_LONG_VALUE_ROW_SIZE) {
        type = LONG_VALUE_TYPE_OTHER_PAGE;
    } else {
        type = LONG_VALUE_TYPE_OTHER_PAGES;
    }

    ByteBuffer def = getPageChannel().createBuffer(lvalDefLen);
    // take length and apply type to first byte
    int lengthWithFlags = value.length | (type << 24);
    def.putInt(lengthWithFlags);

    if (type == LONG_VALUE_TYPE_THIS_PAGE) {
        // write long value inline
        def.putInt(0);
        def.putInt(0); //Unknown
        def.put(value);
    } else {

        TempPageHolder lvalBufferH = getTable().getLongValueBuffer();
        ByteBuffer lvalPage = null;
        int firstLvalPageNum = PageChannel.INVALID_PAGE_NUMBER;
        byte firstLvalRow = 0;

        // write other page(s)
        switch (type) {
        case LONG_VALUE_TYPE_OTHER_PAGE:
            lvalPage = getLongValuePage(value.length, lvalBufferH);
            firstLvalPageNum = lvalBufferH.getPageNumber();
            firstLvalRow = (byte) Table.addDataPageRow(lvalPage, value.length, getFormat(), 0);
            lvalPage.put(value);
            getPageChannel().writePage(lvalPage, firstLvalPageNum);
            break;

        case LONG_VALUE_TYPE_OTHER_PAGES:

            ByteBuffer buffer = ByteBuffer.wrap(value);
            int remainingLen = buffer.remaining();
            buffer.limit(0);
            lvalPage = getLongValuePage(getFormat().MAX_LONG_VALUE_ROW_SIZE, lvalBufferH);
            firstLvalPageNum = lvalBufferH.getPageNumber();
            int lvalPageNum = firstLvalPageNum;
            ByteBuffer nextLvalPage = null;
            int nextLvalPageNum = 0;
            while (remainingLen > 0) {
                lvalPage.clear();

                // figure out how much we will put in this page (we need 4 bytes for
                // the next page pointer)
                int chunkLength = Math.min(getFormat().MAX_LONG_VALUE_ROW_SIZE - 4, remainingLen);

                // figure out if we will need another page, and if so, allocate it
                if (chunkLength < remainingLen) {
                    // force a new page to be allocated
                    lvalBufferH.clear();
                    nextLvalPage = getLongValuePage(getFormat().MAX_LONG_VALUE_ROW_SIZE, lvalBufferH);
                    nextLvalPageNum = lvalBufferH.getPageNumber();
                } else {
                    nextLvalPage = null;
                    nextLvalPageNum = 0;
                }

                // add row to this page
                byte lvalRow = (byte) Table.addDataPageRow(lvalPage, chunkLength + 4, getFormat(), 0);

                // write next page info (we'll always be writing into row 0 for
                // newly created pages)
                lvalPage.put((byte) 0); // row number
                ByteUtil.put3ByteInt(lvalPage, nextLvalPageNum); // page number

                // write this page's chunk of data
                buffer.limit(buffer.limit() + chunkLength);
                lvalPage.put(buffer);
                remainingLen -= chunkLength;

                // write new page to database
                getPageChannel().writePage(lvalPage, lvalPageNum);

                if (lvalPageNum == firstLvalPageNum) {
                    // save initial row info
                    firstLvalRow = lvalRow;
                } else {
                    // check assertion that we wrote to row 0 for all subsequent pages
                    if (lvalRow != (byte) 0) {
                        throw new IllegalStateException("Expected row 0, but was " + lvalRow);
                    }
                }

                // move to next page
                lvalPage = nextLvalPage;
                lvalPageNum = nextLvalPageNum;
            }
            break;

        default:
            throw new IOException("Unrecognized long value type: " + type);
        }

        // update def
        def.put(firstLvalRow);
        ByteUtil.put3ByteInt(def, firstLvalPageNum);
        def.putInt(0); //Unknown

    }

    def.flip();
    return def;
}

From source file:com.linkedin.databus.core.DbusEventBuffer.java

private int readEventsInternal(ReadableByteChannel readChannel,
        Iterable<InternalDatabusEventsListener> eventListeners, DbusEventsStatisticsCollector statsCollector)
        throws InvalidEventException {
    final boolean logDebugEnabled = _log.isDebugEnabled();

    ReadEventsReadPosition readPos = new ReadEventsReadPosition();
    ReadEventsWritePosition writePos = new ReadEventsWritePosition();

    _readBufferLock.lock();//from  w ww  .j  av  a 2s. c o  m
    try {
        _eventState = WindowState.IN_READ;

        boolean mightHaveMoreData = true;
        //ensuring index is updated correctly if a control event of preceding window doesn't appear
        //first (no start() called)
        if (_scnIndex.isEnabled() && _scnIndex.isEmpty()) {
            _scnIndex.setUpdateOnNext(true);
        }
        try {
            while (mightHaveMoreData) {
                final ByteBuffer readBuffer = readPos.getReadBuffer();
                boolean success = readEventsFromChannel(readChannel, readBuffer, logDebugEnabled);
                readPos.startIteration();

                final int numBytesRead = readPos.bytesRemaining();

                //if there is an error we'll try to process whatever was read but stop after that
                mightHaveMoreData = success && (numBytesRead > 0)
                        && (readBuffer.position() == readBuffer.limit());

                if (numBytesRead > 0) {
                    _queueLock.lock();
                    try {
                        if (isClosed()) {
                            LOG.warn(
                                    "stopping attempt to read more events into a buffer while it is closed. readPos="
                                            + readPos + "; buf=" + this.toString());
                            return 0;
                        }
                        try {
                            _scnIndex.assertHeadPosition(_head.getRealPosition());
                            _bufferPositionParser.assertSpan(_head.getPosition(),
                                    _currentWritePosition.getPosition(), logDebugEnabled);
                        } catch (RuntimeException re) {
                            _log.fatal("Got runtime Exception :", re);
                            _log.fatal("Event Buffer is :" + toString());
                            _scnIndex.printVerboseString(_log, Level.DEBUG);
                            throw re;
                        }

                        readBuffer.flip();
                        boolean hasMoreInStgBuffer = true;
                        while (hasMoreInStgBuffer && readPos.hasNext()) {
                            writePos.startNewIteration();

                            //figure out the boundary of events at which we can write
                            //leave one byte at the end, to distinguish between a finalized full ByteBuffer
                            //(limit <= capacity - 1) and a ByteBuffer that is still being written to
                            //(limit == capacity)
                            final int contiguousCapacity = writePos.getCurBuf().capacity()
                                    - writePos.getCurOfs() - 1;

                            final ReadEventsScanStatus eventScanStatus = readPos.startEventProcessing();
                            switch (eventScanStatus) {
                            case OK: {
                                final int curEventSize = readPos.getCurEvent().size();
                                if (readPos.bytesProcessed() + curEventSize > contiguousCapacity) {
                                    //not enough space to fit event in the target buffer
                                    if (0 == writePos.getCurOfs()) {
                                        //event bigger than the ByteBuffer capacity
                                        throw new InvalidEventException("event too big to fit into buffer"
                                                + "; size:" + curEventSize + "; event:" + readPos.getCurEvent()
                                                + "; " + readPos + "; buffer.capacity:"
                                                + writePos.getCurBuf().capacity());
                                    } else {
                                        if (logDebugEnabled)
                                            _log.debug("unable to fit event with size "
                                                    + readPos.getCurEvent().size());

                                        //if we could not fit all the data in the destination ByteBuffer,
                                        //we should ensure that we clear up any remaining data in the
                                        //ByteBuffer.
                                        long nextBufferPos = _bufferPositionParser
                                                .incrementIndex(writePos.getCurPos(), _buffers);
                                        boolean interrupted = ensureFreeSpace(writePos.getCurPos(),
                                                nextBufferPos, logDebugEnabled);
                                        if (interrupted) {
                                            _log.warn("ensureFree space interrupted: " + readPos + " "
                                                    + writePos);
                                            return readPos.getNumReadEvents();
                                        }
                                        assert assertBuffersLimits();

                                        writePos.moveToNextBuffer();
                                        _tail.copy(_currentWritePosition);
                                        assert assertBuffersLimits();
                                    }
                                } else {
                                    //we can fit the event in the target buffer
                                    readPos.eventAccepted(); //done with processing in the stg buffer

                                    //how are we on free space?
                                    boolean interrupted = ensureFreeSpace(writePos.getCurPos(),
                                            writePos.getCurPos() + curEventSize, logDebugEnabled);
                                    if (interrupted) {
                                        _log.warn("ensureFree space interrupted:" + readPos + " " + writePos);
                                        return readPos.getNumReadEvents();
                                    }

                                    writePos.determineWriteEnd(readPos);

                                    //we are good on free space, about time to copy the damn data
                                    copyReadEventToEventBuffer(readPos, writePos, eventListeners,
                                            statsCollector, logDebugEnabled);
                                }
                                break;
                            }
                            case PARTIAL_EVENT: {
                                final int curCapacity = readBuffer.capacity();
                                if (logDebugEnabled)
                                    _log.debug("partial event at " + readPos);
                                if (0 != readPos.getReadStart()) {
                                    //compact stg buffer and try to read more data from the network
                                    compactStgBuffer(readPos, logDebugEnabled);
                                    hasMoreInStgBuffer = false;
                                } else if (curCapacity >= getMaxReadBufferCapacity()) {
                                    //we couldn't read an entire event in the staging buffer and we are already
                                    //at max allowed size of the read buffer
                                    throw new InvalidEventException(
                                            "event too big to fit in staging buffer with capacity : "
                                                    + curCapacity + "; readPos:" + readPos
                                                    + "; consider increasing connectionDefaults.eventBuffer.maxSize"
                                                    + " or connectionDefaults.eventBuffer.maxEventSize if set explicitly.");
                                } else {
                                    //grow the staging buffer faster for small sizes and slower for big sizes
                                    //intuitively: <= 5K - 3x, 25K - 2x, 125K - 1.6x, 625K - 1.5x and so on
                                    final double growFactor = curCapacity <= 5 * 1024 ? 3.0
                                            : 1.0 + 2.0 * LN_5 / Math.log(curCapacity / 1024.0);
                                    final int newSize = Math.min(getMaxReadBufferCapacity(),
                                            (int) (growFactor * curCapacity));
                                    if (newSize < curCapacity) {
                                        throw new DatabusRuntimeException("unexpected readbuffer size: "
                                                + newSize + "; readBuffer=" + readBuffer
                                                + "; readBufferCapacity=" + getMaxReadBufferCapacity());
                                    }
                                    readPos.growReadBuffer(newSize);
                                    hasMoreInStgBuffer = false;
                                }
                                break;
                            }
                            case SCN_REGRESSION: {
                                // events should be monotonically increasing
                                // skipping the event and all the events before it (same buffer should have
                                // only increasing events)
                                String errMsg = logSequenceErrorPackets(readPos);
                                _log.warn("got an old event: seq=" + readPos.getSeq() + ", " + errMsg);
                                readPos.eventSkipped();
                                break;
                            }
                            case INVALID_EVENT: {
                                if (null != statsCollector)
                                    statsCollector
                                            .registerEventError(DbusEventInternalReadable.EventScanStatus.ERR);
                                throw new InvalidEventException();
                            }
                            case MISSING_EOP: {
                                String errMsg = logSequenceErrorPackets(readPos);
                                _log.error("detected missing EOP: " + errMsg);
                                throw new InvalidEventException(errMsg);
                            }
                            default:
                                throw new IllegalStateException("unknown scan status: " + eventScanStatus);
                            }
                        }

                        if (!readPos.hasNext()) {
                            readBuffer.clear();
                        }
                    } finally {
                        _queueLock.unlock();
                    }
                }
            }
        } finally {
            if (null != statsCollector) {
                statsCollector.registerBufferMetrics(getMinScn(), this.lastWrittenScn(), this.getPrevScn(),
                        this.getBufferFreeSpace());
                statsCollector.registerTimestampOfFirstEvent(_timestampOfFirstEvent);
            }
            _eventState = WindowState.ENDED;
        }
    } catch (RuntimeException re) {
        _log.error("Got runtime exception in readEvents: " + re.getMessage(), re);
        _log.error("Buffer State: " + toString());
        throw re;
    } finally {
        _readBufferLock.unlock();
        writePos.close();
    }

    if (logDebugEnabled)
        _log.debug("readEvents result: " + readPos + " " + writePos);

    return readPos.getNumReadEvents();
}

From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java

/**
 * Compress a set of columns.//from  w w w.  j  a va  2s  .  co  m
 *
 * The header contains a compressed array of data types.
 * The body contains compressed columns and their metadata.
 * The footer contains a compressed array of chunk sizes. The final four bytes of the footer encode the byte size of that compressed array.
 *
 * @param colSet
 *
 * @return ByteBuffer representing the compressed set.
 */
@Override
public ByteBuffer compress(ColumnBuffer[] colSet) {

    // Many compression libraries allow you to avoid allocation of intermediate arrays.
    // To use these API, we need to preallocate the output container.

    // Reserve space for the header.
    int[] dataType = new int[colSet.length];
    int maxCompressedSize = Snappy.maxCompressedLength(4 * dataType.length);

    // Reserve space for the compressed nulls BitSet for each column.
    maxCompressedSize += colSet.length * Snappy.maxCompressedLength((colSet.length / 8) + 1);

    // Track the length of `List<Integer> compressedSize` which will be declared later.
    int uncompressedFooterLength = 1 + 2 * colSet.length;

    for (int colNum = 0; colNum < colSet.length; ++colNum) {
        // Reserve space for the compressed columns.
        dataType[colNum] = colSet[colNum].getType().toTType().getValue();
        switch (TTypeId.findByValue(dataType[colNum])) {
        case BOOLEAN_TYPE:
            maxCompressedSize += Integer.SIZE / Byte.SIZE; // This is for the encoded length.
            maxCompressedSize += Snappy.maxCompressedLength((colSet.length / 8) + 1);
            break;
        case TINYINT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length);
            break;
        case SMALLINT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Short.SIZE / Byte.SIZE);
            break;
        case INT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE);
            break;
        case BIGINT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Long.SIZE / Byte.SIZE);
            break;
        case DOUBLE_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Double.SIZE / Byte.SIZE);
            break;
        case BINARY_TYPE:
            // Reserve space for the size of the compressed array of row sizes.
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE);

            // Reserve space for the size of the compressed flattened bytes.
            for (ByteBuffer nextBuffer : colSet[colNum].toTColumn().getBinaryVal().getValues()) {
                maxCompressedSize += Snappy.maxCompressedLength(nextBuffer.limit());
            }

            // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array).
            uncompressedFooterLength++;

            break;
        case STRING_TYPE:
            // Reserve space for the size of the compressed array of row sizes.
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE);

            // Reserve space for the size of the compressed flattened bytes.
            for (String nextString : colSet[colNum].toTColumn().getStringVal().getValues()) {
                maxCompressedSize += Snappy
                        .maxCompressedLength(nextString.getBytes(StandardCharsets.UTF_8).length);
            }

            // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array).
            uncompressedFooterLength++;

            break;
        default:
            throw new IllegalStateException("Unrecognized column type");
        }
    }
    // Reserve space for the footer.
    maxCompressedSize += Snappy.maxCompressedLength(uncompressedFooterLength * Integer.SIZE / Byte.SIZE);

    // Allocate the output container.
    ByteBuffer output = ByteBuffer.allocate(maxCompressedSize);

    // Allocate the footer. This goes in the footer because we don't know the chunk sizes until after
    // the columns have been compressed and written.
    ArrayList<Integer> compressedSize = new ArrayList<Integer>(uncompressedFooterLength);

    // Write to the output buffer.
    try {
        // Write the header.
        compressedSize.add(writePrimitives(dataType, output));

        // Write the compressed columns and metadata.
        for (int colNum = 0; colNum < colSet.length; colNum++) {
            switch (TTypeId.findByValue(dataType[colNum])) {
            case BOOLEAN_TYPE: {
                TBoolColumn column = colSet[colNum].toTColumn().getBoolVal();

                List<Boolean> bools = column.getValues();
                BitSet bsBools = new BitSet(bools.size());
                for (int rowNum = 0; rowNum < bools.size(); rowNum++) {
                    bsBools.set(rowNum, bools.get(rowNum));
                }

                compressedSize.add(writePrimitives(column.getNulls(), output));

                // BitSet won't write trailing zeroes so we encode the length
                output.putInt(column.getValuesSize());

                compressedSize.add(writePrimitives(bsBools.toByteArray(), output));

                break;
            }
            case TINYINT_TYPE: {
                TByteColumn column = colSet[colNum].toTColumn().getByteVal();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedBytes(column.getValues(), output));
                break;
            }
            case SMALLINT_TYPE: {
                TI16Column column = colSet[colNum].toTColumn().getI16Val();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedShorts(column.getValues(), output));
                break;
            }
            case INT_TYPE: {
                TI32Column column = colSet[colNum].toTColumn().getI32Val();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedIntegers(column.getValues(), output));
                break;
            }
            case BIGINT_TYPE: {
                TI64Column column = colSet[colNum].toTColumn().getI64Val();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedLongs(column.getValues(), output));
                break;
            }
            case DOUBLE_TYPE: {
                TDoubleColumn column = colSet[colNum].toTColumn().getDoubleVal();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedDoubles(column.getValues(), output));
                break;
            }
            case BINARY_TYPE: {
                TBinaryColumn column = colSet[colNum].toTColumn().getBinaryVal();

                // Initialize the array of row sizes.
                int[] rowSizes = new int[column.getValuesSize()];
                int totalSize = 0;
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    rowSizes[rowNum] = column.getValues().get(rowNum).limit();
                    totalSize += column.getValues().get(rowNum).limit();
                }

                // Flatten the data for Snappy for a better compression ratio.
                ByteBuffer flattenedData = ByteBuffer.allocate(totalSize);
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    flattenedData.put(column.getValues().get(rowNum));
                }

                // Write nulls bitmap.
                compressedSize.add(writePrimitives(column.getNulls(), output));

                // Write the list of row sizes.
                compressedSize.add(writePrimitives(rowSizes, output));

                // Write the compressed, flattened data.
                compressedSize.add(writePrimitives(flattenedData.array(), output));

                break;
            }
            case STRING_TYPE: {
                TStringColumn column = colSet[colNum].toTColumn().getStringVal();

                // Initialize the array of row sizes.
                int[] rowSizes = new int[column.getValuesSize()];
                int totalSize = 0;
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    rowSizes[rowNum] = column.getValues().get(rowNum).length();
                    totalSize += column.getValues().get(rowNum).length();
                }

                // Flatten the data for Snappy for a better compression ratio.
                StringBuilder flattenedData = new StringBuilder(totalSize);
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    flattenedData.append(column.getValues().get(rowNum));
                }

                // Write nulls bitmap.
                compressedSize.add(writePrimitives(column.getNulls(), output));

                // Write the list of row sizes.
                compressedSize.add(writePrimitives(rowSizes, output));

                // Write the flattened data.
                compressedSize.add(
                        writePrimitives(flattenedData.toString().getBytes(StandardCharsets.UTF_8), output));

                break;
            }
            default:
                throw new IllegalStateException("Unrecognized column type");
            }
        }

        // Write the footer.
        output.putInt(writeBoxedIntegers(compressedSize, output));

    } catch (IOException e) {
        e.printStackTrace();
    }
    output.flip();
    return output;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Serialize a row of Objects into a byte buffer.
 * /*w w  w  . jav a2 s  . c o  m*/
 * @param rowArray row data, expected to be correct length for this table
 * @param buffer buffer to which to write the row data
 * @param minRowSize min size for result row
 * @param rawVarValues optional, pre-written values for var length columns
 *                     (enables re-use of previously written values).
 * @return the given buffer, filled with the row data
 */
private ByteBuffer createRow(Object[] rowArray, ByteBuffer buffer, int minRowSize,
        Map<Column, byte[]> rawVarValues) throws IOException {
    buffer.putShort(_maxColumnCount);
    NullMask nullMask = new NullMask(_maxColumnCount);

    //Fixed length column data comes first
    int fixedDataStart = buffer.position();
    int fixedDataEnd = fixedDataStart;
    for (Column col : _columns) {

        if (col.isVariableLength()) {
            continue;
        }

        Object rowValue = col.getRowValue(rowArray);

        if (col.getType() == DataType.BOOLEAN) {

            if (Column.toBooleanValue(rowValue)) {
                //Booleans are stored in the null mask
                nullMask.markNotNull(col);
            }
            rowValue = null;
        }

        if (rowValue != null) {

            // we have a value to write
            nullMask.markNotNull(col);

            // remainingRowLength is ignored when writing fixed length data
            buffer.position(fixedDataStart + col.getFixedDataOffset());
            buffer.put(col.write(rowValue, 0));
        }

        // always insert space for the entire fixed data column length
        // (including null values), access expects the row to always be at least
        // big enough to hold all fixed values
        buffer.position(fixedDataStart + col.getFixedDataOffset() + col.getLength());

        // keep track of the end of fixed data
        if (buffer.position() > fixedDataEnd) {
            fixedDataEnd = buffer.position();
        }

    }

    // reposition at end of fixed data
    buffer.position(fixedDataEnd);

    // only need this info if this table contains any var length data
    if (_maxVarColumnCount > 0) {

        int maxRowSize = getFormat().MAX_ROW_SIZE;

        // figure out how much space remains for var length data.  first,
        // account for already written space
        maxRowSize -= buffer.position();
        // now, account for trailer space
        int trailerSize = (nullMask.byteSize() + 4 + (_maxVarColumnCount * 2));
        maxRowSize -= trailerSize;

        // for each non-null long value column we need to reserve a small
        // amount of space so that we don't end up running out of row space
        // later by being too greedy
        for (Column varCol : _varColumns) {
            if ((varCol.getType().isLongValue()) && (varCol.getRowValue(rowArray) != null)) {
                maxRowSize -= getFormat().SIZE_LONG_VALUE_DEF;
            }
        }

        //Now write out variable length column data
        short[] varColumnOffsets = new short[_maxVarColumnCount];
        int varColumnOffsetsIndex = 0;
        for (Column varCol : _varColumns) {
            short offset = (short) buffer.position();
            Object rowValue = varCol.getRowValue(rowArray);
            if (rowValue != null) {
                // we have a value
                nullMask.markNotNull(varCol);

                byte[] rawValue = null;
                ByteBuffer varDataBuf = null;
                if (((rawValue = rawVarValues.get(varCol)) != null) && (rawValue.length <= maxRowSize)) {
                    // save time and potentially db space, re-use raw value
                    varDataBuf = ByteBuffer.wrap(rawValue);
                } else {
                    // write column value
                    varDataBuf = varCol.write(rowValue, maxRowSize);
                }

                maxRowSize -= varDataBuf.remaining();
                if (varCol.getType().isLongValue()) {
                    // we already accounted for some amount of the long value data
                    // above.  add that space back so we don't double count
                    maxRowSize += getFormat().SIZE_LONG_VALUE_DEF;
                }
                buffer.put(varDataBuf);
            }

            // we do a loop here so that we fill in offsets for deleted columns
            while (varColumnOffsetsIndex <= varCol.getVarLenTableIndex()) {
                varColumnOffsets[varColumnOffsetsIndex++] = offset;
            }
        }

        // fill in offsets for any remaining deleted columns
        while (varColumnOffsetsIndex < varColumnOffsets.length) {
            varColumnOffsets[varColumnOffsetsIndex++] = (short) buffer.position();
        }

        // record where we stopped writing
        int eod = buffer.position();

        // insert padding if necessary
        padRowBuffer(buffer, minRowSize, trailerSize);

        buffer.putShort((short) eod); //EOD marker

        //Now write out variable length offsets
        //Offsets are stored in reverse order
        for (int i = _maxVarColumnCount - 1; i >= 0; i--) {
            buffer.putShort(varColumnOffsets[i]);
        }
        buffer.putShort(_maxVarColumnCount); //Number of var length columns

    } else {

        // insert padding for row w/ no var cols
        padRowBuffer(buffer, minRowSize, nullMask.byteSize());
    }

    nullMask.write(buffer); //Null mask
    buffer.flip();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Creating new data block:\n" + ByteUtil.toHexString(buffer, buffer.limit()));
    }
    return buffer;
}

From source file:org.apache.geode.internal.cache.Oplog.java

private void flush(OplogFile olf, boolean doSync) throws IOException {
    try {/*from  w w  w . j a  v  a 2  s  .  c  o  m*/
        // No need to get the backup lock prior to synchronizing (correct lock order) since the
        // synchronized block does not attempt to get the backup lock (incorrect lock order)
        synchronized (this.lock/* olf */) {
            if (olf.RAFClosed) {
                return;
            }
            ByteBuffer bb = olf.writeBuf;
            if (bb != null && bb.position() != 0) {
                bb.flip();
                int flushed = 0;
                int numChannelRetries = 0;
                do {
                    int channelBytesWritten = 0;
                    final int bbStartPos = bb.position();
                    final long channelStartPos = olf.channel.position();
                    // differentiate between bytes written on this channel.write() iteration and the
                    // total number of bytes written to the channel on this call
                    channelBytesWritten = olf.channel.write(bb);
                    // Expect channelBytesWritten and the changes in pp.position() and channel.position() to
                    // be the same. If they are not, then the channel.write() silently failed. The following
                    // retry separates spurious failures from permanent channel failures.
                    if (channelBytesWritten != bb.position() - bbStartPos) {
                        if (numChannelRetries++ < MAX_CHANNEL_RETRIES) {
                            // Reset the ByteBuffer position, but take into account anything that did get
                            // written to the channel
                            channelBytesWritten = (int) (olf.channel.position() - channelStartPos);
                            bb.position(bbStartPos + channelBytesWritten);
                        } else {
                            throw new IOException("Failed to write Oplog entry to" + olf.f.getName() + ": "
                                    + "channel.write() returned " + channelBytesWritten + ", "
                                    + "change in channel position = "
                                    + (olf.channel.position() - channelStartPos) + ", "
                                    + "change in source buffer position = " + (bb.position() - bbStartPos));
                        }
                    }
                    flushed += channelBytesWritten;
                } while (bb.hasRemaining());
                // update bytesFlushed after entire writeBuffer is flushed to fix bug
                // 41201
                olf.bytesFlushed += flushed;
                bb.clear();
            }
            if (doSync) {
                if (SYNC_WRITES) {
                    // Synch Meta Data as well as content
                    olf.channel.force(true);
                }
            }
        }
    } catch (ClosedChannelException ignore) {
        // It is possible for a channel to be closed when our code does not
        // explicitly call channel.close (when we will set RAFclosed).
        // This can happen when a thread is doing an io op and is interrupted.
        // That thread will see ClosedByInterruptException but it will also
        // close the channel and then we will see ClosedChannelException.
    }
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Serialize a row of Objects into a byte buffer.
 * //from  w w w.  ja  v a  2 s  .  c o m
 * @param rowArray row data, expected to be correct length for this table
 * @param buffer buffer to which to write the row data
 * @param minRowSize min size for result row
 * @param rawVarValues optional, pre-written values for var length columns
 *                     (enables re-use of previously written values).
 * @return the given buffer, filled with the row data
 */
private ByteBuffer createRow(Object[] rowArray, ByteBuffer buffer, int minRowSize,
        Map<ColumnImpl, byte[]> rawVarValues) throws IOException {
    buffer.putShort(_maxColumnCount);
    NullMask nullMask = new NullMask(_maxColumnCount);

    //Fixed length column data comes first
    int fixedDataStart = buffer.position();
    int fixedDataEnd = fixedDataStart;
    for (ColumnImpl col : _columns) {

        if (col.isVariableLength()) {
            continue;
        }

        Object rowValue = col.getRowValue(rowArray);

        if (col.storeInNullMask()) {

            if (col.writeToNullMask(rowValue)) {
                nullMask.markNotNull(col);
            }
            rowValue = null;
        }

        if (rowValue != null) {

            // we have a value to write
            nullMask.markNotNull(col);

            // remainingRowLength is ignored when writing fixed length data
            buffer.position(fixedDataStart + col.getFixedDataOffset());
            buffer.put(col.write(rowValue, 0));
        }

        // always insert space for the entire fixed data column length
        // (including null values), access expects the row to always be at least
        // big enough to hold all fixed values
        buffer.position(fixedDataStart + col.getFixedDataOffset() + col.getLength());

        // keep track of the end of fixed data
        if (buffer.position() > fixedDataEnd) {
            fixedDataEnd = buffer.position();
        }

    }

    // reposition at end of fixed data
    buffer.position(fixedDataEnd);

    // only need this info if this table contains any var length data
    if (_maxVarColumnCount > 0) {

        int maxRowSize = getFormat().MAX_ROW_SIZE;

        // figure out how much space remains for var length data.  first,
        // account for already written space
        maxRowSize -= buffer.position();
        // now, account for trailer space
        int trailerSize = (nullMask.byteSize() + 4 + (_maxVarColumnCount * 2));
        maxRowSize -= trailerSize;

        // for each non-null long value column we need to reserve a small
        // amount of space so that we don't end up running out of row space
        // later by being too greedy
        for (ColumnImpl varCol : _varColumns) {
            if ((varCol.getType().isLongValue()) && (varCol.getRowValue(rowArray) != null)) {
                maxRowSize -= getFormat().SIZE_LONG_VALUE_DEF;
            }
        }

        //Now write out variable length column data
        short[] varColumnOffsets = new short[_maxVarColumnCount];
        int varColumnOffsetsIndex = 0;
        for (ColumnImpl varCol : _varColumns) {
            short offset = (short) buffer.position();
            Object rowValue = varCol.getRowValue(rowArray);
            if (rowValue != null) {
                // we have a value
                nullMask.markNotNull(varCol);

                byte[] rawValue = null;
                ByteBuffer varDataBuf = null;
                if (((rawValue = rawVarValues.get(varCol)) != null) && (rawValue.length <= maxRowSize)) {
                    // save time and potentially db space, re-use raw value
                    varDataBuf = ByteBuffer.wrap(rawValue);
                } else {
                    // write column value
                    varDataBuf = varCol.write(rowValue, maxRowSize);
                }

                maxRowSize -= varDataBuf.remaining();
                if (varCol.getType().isLongValue()) {
                    // we already accounted for some amount of the long value data
                    // above.  add that space back so we don't double count
                    maxRowSize += getFormat().SIZE_LONG_VALUE_DEF;
                }
                buffer.put(varDataBuf);
            }

            // we do a loop here so that we fill in offsets for deleted columns
            while (varColumnOffsetsIndex <= varCol.getVarLenTableIndex()) {
                varColumnOffsets[varColumnOffsetsIndex++] = offset;
            }
        }

        // fill in offsets for any remaining deleted columns
        while (varColumnOffsetsIndex < varColumnOffsets.length) {
            varColumnOffsets[varColumnOffsetsIndex++] = (short) buffer.position();
        }

        // record where we stopped writing
        int eod = buffer.position();

        // insert padding if necessary
        padRowBuffer(buffer, minRowSize, trailerSize);

        buffer.putShort((short) eod); //EOD marker

        //Now write out variable length offsets
        //Offsets are stored in reverse order
        for (int i = _maxVarColumnCount - 1; i >= 0; i--) {
            buffer.putShort(varColumnOffsets[i]);
        }
        buffer.putShort(_maxVarColumnCount); //Number of var length columns

    } else {

        // insert padding for row w/ no var cols
        padRowBuffer(buffer, minRowSize, nullMask.byteSize());
    }

    nullMask.write(buffer); //Null mask
    buffer.flip();
    return buffer;
}

From source file:org.apache.geode.internal.cache.Oplog.java

private void flush(OplogFile olf, ByteBuffer b1, ByteBuffer b2) throws IOException {
    try {//from  w  w w .j a  va2 s.  c  om
        // No need to get the backup lock prior to synchronizing (correct lock order) since the
        // synchronized block does not attempt to get the backup lock (incorrect lock order)
        synchronized (this.lock/* olf */) {
            if (olf.RAFClosed) {
                return;
            }
            this.bbArray[0] = b1;
            this.bbArray[1] = b2;
            b1.flip();
            long flushed = 0;
            do {
                flushed += olf.channel.write(this.bbArray);
            } while (b2.hasRemaining());
            this.bbArray[0] = null;
            this.bbArray[1] = null;
            // update bytesFlushed after entire writeBuffer is flushed to fix bug 41201
            olf.bytesFlushed += flushed;
            b1.clear();
        }
    } catch (ClosedChannelException ignore) {
        // It is possible for a channel to be closed when our code does not
        // explicitly call channel.close (when we will set RAFclosed).
        // This can happen when a thread is doing an io op and is interrupted.
        // That thread will see ClosedByInterruptException but it will also
        // close the channel and then we will see ClosedChannelException.
    }
}

From source file:com.sonicle.webtop.mail.Service.java

public static void fastChannelCopy(final ReadableByteChannel src, final WritableByteChannel dest)
        throws IOException {
    final ByteBuffer buffer = ByteBuffer.allocateDirect(16 * 1024);
    while (src.read(buffer) != -1) {
        // prepare the buffer to be drained
        buffer.flip();
        // write to the channel, may block
        dest.write(buffer);// ww w  .j  a  va  2  s  .c o m
        // If partial transfer, shift remainder down
        // If buffer is empty, same as doing clear()
        buffer.compact();
    }
    // EOF will leave buffer in fill state
    buffer.flip();
    // make sure the buffer is fully drained.
    while (buffer.hasRemaining()) {
        dest.write(buffer);
    }
}

From source file:edu.hawaii.soest.kilonalu.dvp2.DavisWxSource.java

/**
 * A method that executes the streaming of data from the source to the RBNB
 * server after all configuration of settings, connections to hosts, and
 * thread initiatizing occurs.  This method contains the detailed code for 
 * streaming the data and interpreting the stream.
 *///from   ww  w . ja  v a2 s . com
protected boolean execute() {
    logger.debug("DavisWxSource.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    // while data are being sent, read them into the buffer
    try {

        this.socketChannel = getSocketConnection();

        // create four byte placeholders used to evaluate up to a four-byte 
        // window.  The FIFO layout looks like:
        //           -------------------------
        //   in ---> | One | Two |Three|Four |  ---> out
        //           -------------------------
        byte byteOne = 0x00, // set initial placeholder values
                byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00;

        // Create a buffer that will store the sample bytes as they are read
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        // create a byte buffer to store bytes from the TCP stream
        ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize());

        // add a channel of data that will be pushed to the server.  
        // Each sample will be sent to the Data Turbine as an rbnb frame.
        ChannelMap rbnbChannelMap = new ChannelMap();
        int channelIndex = 0;

        // add the raw binary LOOP packet data
        //channelIndex = rbnbChannelMap.Add(getRBNBChannelName());
        //rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the barTrendAsString field data
        channelIndex = rbnbChannelMap.Add("barTrendAsString"); // Falling Slowly
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the barometer field data
        channelIndex = rbnbChannelMap.Add("barometer"); // 29.9
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch Hg");

        // add the insideTemperature field data
        channelIndex = rbnbChannelMap.Add("insideTemperature"); // 83.9
        rbnbChannelMap.PutUserInfo(channelIndex, "units=degrees F");

        // add the insideHumidity field data
        channelIndex = rbnbChannelMap.Add("insideHumidity"); // 51
        rbnbChannelMap.PutUserInfo(channelIndex, "units=percent");

        // add the outsideTemperature field data
        channelIndex = rbnbChannelMap.Add("outsideTemperature"); // 76.7
        rbnbChannelMap.PutUserInfo(channelIndex, "units=degrees F");

        // add the windSpeed field data
        channelIndex = rbnbChannelMap.Add("windSpeed"); // 5
        rbnbChannelMap.PutUserInfo(channelIndex, "units=mph");

        // add the tenMinuteAverageWindSpeed field data
        channelIndex = rbnbChannelMap.Add("tenMinuteAverageWindSpeed"); // 4
        rbnbChannelMap.PutUserInfo(channelIndex, "units=mph");

        // add the windDirection field data
        channelIndex = rbnbChannelMap.Add("windDirection"); // 80
        rbnbChannelMap.PutUserInfo(channelIndex, "units=degrees");

        // add the outsideHumidity field data
        channelIndex = rbnbChannelMap.Add("outsideHumidity"); // 73
        rbnbChannelMap.PutUserInfo(channelIndex, "units=percent");

        // add the rainRate field data
        channelIndex = rbnbChannelMap.Add("rainRate"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch/hour");

        // add the uvRadiation field data
        channelIndex = rbnbChannelMap.Add("uvRadiation"); // 0
        rbnbChannelMap.PutUserInfo(channelIndex, "UV index");

        // add the solarRadiation field data
        channelIndex = rbnbChannelMap.Add("solarRadiation"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "watt/m^2");

        // add the stormRain field data
        channelIndex = rbnbChannelMap.Add("stormRain"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "inch");

        // add the currentStormStartDate field data
        channelIndex = rbnbChannelMap.Add("currentStormStartDate"); // -1--1-1999
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the dailyRain field data
        channelIndex = rbnbChannelMap.Add("dailyRain"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch");

        // add the monthlyRain field data
        channelIndex = rbnbChannelMap.Add("monthlyRain"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch");

        // add the yearlyRain field data
        channelIndex = rbnbChannelMap.Add("yearlyRain"); // 15.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch");

        // add the dailyEvapoTranspiration field data
        channelIndex = rbnbChannelMap.Add("dailyEvapoTranspiration"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch");

        // add the monthlyEvapoTranspiration field data
        channelIndex = rbnbChannelMap.Add("monthlyEvapoTranspiration"); // 0.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch");

        // add the yearlyEvapoTranspiration field data
        channelIndex = rbnbChannelMap.Add("yearlyEvapoTranspiration"); // 93.0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=inch");

        // add the transmitterBatteryStatus field data
        channelIndex = rbnbChannelMap.Add("transmitterBatteryStatus"); // 0
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the consoleBatteryVoltage field data
        channelIndex = rbnbChannelMap.Add("consoleBatteryVoltage"); // 4.681640625
        rbnbChannelMap.PutUserInfo(channelIndex, "units=volts");

        // add the forecastAsString field data
        channelIndex = rbnbChannelMap.Add("forecastAsString"); // Partially Cloudy
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the forecastRuleNumberAsString field data
        //channelIndex = rbnbChannelMap.Add("forecastRuleNumberAsString");      // Increasing clouds with little temperature change.
        //rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the timeOfSunrise field data
        channelIndex = rbnbChannelMap.Add("timeOfSunrise"); // 05:49
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // add the timeOfSunset field data
        channelIndex = rbnbChannelMap.Add("timeOfSunset"); // 19:11
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        channelIndex = rbnbChannelMap.Add("DecimalASCIISampleData"); // sample data as ASCII
        rbnbChannelMap.PutUserInfo(channelIndex, "units=none");

        // register the channel map of variables and units with the DataTurbine
        getSource().Register(rbnbChannelMap);
        // reset variables for use with the incoming data
        rbnbChannelMap.Clear();
        channelIndex = 0;

        // wake the instrument with an initial '\n' command
        this.command = this.commandSuffix;
        this.sentCommand = queryInstrument(this.command);

        // allow time for the instrument response
        streamingThread.sleep(2000);
        this.command = this.commandPrefix + this.takeSampleCommand + this.commandSuffix;
        this.sentCommand = queryInstrument(command);

        // while there are bytes to read from the socket ...
        while (this.socketChannel.read(buffer) != -1 || buffer.position() > 0) {
            // prepare the buffer for reading
            buffer.flip();

            // while there are unread bytes in the ByteBuffer
            while (buffer.hasRemaining()) {
                byteOne = buffer.get();
                //logger.debug("b1: " + new String(Hex.encodeHex((new byte[]{byteOne})))   + "\t" + 
                //             "b2: " + new String(Hex.encodeHex((new byte[]{byteTwo})))   + "\t" + 
                //             "b3: " + new String(Hex.encodeHex((new byte[]{byteThree}))) + "\t" + 
                //             "b4: " + new String(Hex.encodeHex((new byte[]{byteFour})))  + "\t" +
                //             "sample pos: "   + sampleBuffer.position()                  + "\t" +
                //             "sample rem: "   + sampleBuffer.remaining()                 + "\t" +
                //             "sample cnt: "   + sampleByteCount                          + "\t" +
                //             "buffer pos: "   + buffer.position()                        + "\t" +
                //             "buffer rem: "   + buffer.remaining()                       + "\t" +
                //             "state: "        + state
                //);

                // Use a State Machine to process the byte stream.
                // Start building an rbnb frame for the entire sample, first by 
                // inserting a timestamp into the channelMap.  This time is merely
                // the time of insert into the data turbine, not the time of
                // observations of the measurements.  That time should be parsed out
                // of the sample in the Sink client code

                switch (state) {

                case 0:

                    // sample line is begun by "ACK L" (the first part of ACK + "LOOP")
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == 0x4C && byteTwo == 0x06) {

                        sampleByteCount++; // add the last byte found to the count

                        // add the last byte found to the sample buffer
                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            sampleBuffer.put(byteOne);

                        }

                        // we've found the beginning of a sample, move on
                        state = 1;
                        break;

                    } else {
                        break;
                    }

                case 1: // read the rest of the bytes to the next EOL characters

                    // sample line is terminated by "\n\r"
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == 0x0D && byteTwo == 0x0A) {

                        sampleByteCount++; // add the last byte found to the count

                        // add the last byte found to the sample buffer
                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            sampleBuffer.put(byteOne);

                        }
                        state = 3;
                        break;
                    } else { // not 0x0A0D

                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);
                        } else {
                            sampleBuffer.compact();
                            logger.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                        break;
                    } // end if for 0x0A0D EOL

                case 3:

                    // At this point, we've found the \n\r delimiter, read the first
                    // of 2 CRC bytes
                    sampleByteCount++; // add the last byte found to the count

                    // add the last byte found to the sample buffer
                    if (sampleBuffer.remaining() > 0) {
                        sampleBuffer.put(byteOne);

                    } else {
                        sampleBuffer.compact();
                        sampleBuffer.put(byteOne);

                    }
                    state = 4;
                    break;

                case 4:

                    // At this point, we've found the \n\r delimiter, read the second
                    // of 2 CRC bytes
                    sampleByteCount++; // add the last byte found to the count

                    // add the last byte found to the sample buffer
                    if (sampleBuffer.remaining() > 0) {
                        sampleBuffer.put(byteOne);

                    } else {
                        sampleBuffer.compact();
                        sampleBuffer.put(byteOne);

                    }
                    state = 0;

                    // extract just the length of the sample bytes out of the
                    // sample buffer, and place it in the channel map as a 
                    // byte array.  Then, send it to the data turbine.
                    byte[] sampleArray = new byte[sampleByteCount];

                    try {
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);

                        // parse and send the sample to the data turbine
                        this.davisWxParser = new DavisWxParser(sampleBuffer);

                    } catch (java.lang.Exception e) {
                        logger.info(
                                "There was a problem parsing the binary weather LOOP packet. Skipping this sample.");
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        rbnbChannelMap.Clear();
                        break;
                    }

                    // create a character string to store characters from the TCP stream
                    StringBuilder decimalASCIISampleData = new StringBuilder();

                    rbnbChannelMap.PutTimeAuto("server");

                    // add the raw binary LOOP packet data
                    //channelIndex = rbnbChannelMap.Add(getRBNBChannelName());
                    //rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    //rbnbChannelMap.PutDataAsByteArray(channelIndex, sampleArray);         // raw binary LOOP packet

                    // add the barTrendAsString field data
                    channelIndex = rbnbChannelMap.Add("barTrendAsString"); // Falling Slowly
                    rbnbChannelMap.PutMime(channelIndex, "text/plain");
                    rbnbChannelMap.PutDataAsString(channelIndex, davisWxParser.getBarTrendAsString());
                    decimalASCIISampleData.append(
                            String.format("\"%16s\"", (Object) davisWxParser.getBarTrendAsString()) + ", ");

                    // add the packetType field to the ASCII string only
                    decimalASCIISampleData.append(
                            String.format("%1d", (Object) new Integer(davisWxParser.getPacketType())) + ", ");

                    // add the nextRecord field to the ASCII string only
                    decimalASCIISampleData.append(
                            String.format("%04d", (Object) new Integer(davisWxParser.getNextRecord())) + ", ");

                    // add the barometer field data
                    channelIndex = rbnbChannelMap.Add("barometer"); // 29.9
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex, new float[] { davisWxParser.getBarometer() });
                    decimalASCIISampleData.append(
                            String.format("%06.4f", (Object) new Float(davisWxParser.getBarometer())) + ", ");

                    // add the insideTemperature field data
                    channelIndex = rbnbChannelMap.Add("insideTemperature"); // 83.9
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getInsideTemperature() });
                    decimalASCIISampleData.append(
                            String.format("%05.2f", (Object) new Float(davisWxParser.getInsideTemperature()))
                                    + ", ");

                    // add the insideHumidity field data
                    channelIndex = rbnbChannelMap.Add("insideHumidity"); // 51
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsInt32(channelIndex,
                            new int[] { davisWxParser.getInsideHumidity() });
                    decimalASCIISampleData.append(
                            String.format("%03d", (Object) new Integer(davisWxParser.getInsideHumidity()))
                                    + ", ");

                    // add the outsideTemperature field data
                    channelIndex = rbnbChannelMap.Add("outsideTemperature"); // 76.7
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getOutsideTemperature() });
                    decimalASCIISampleData.append(
                            String.format("%05.2f", (Object) new Float(davisWxParser.getOutsideTemperature()))
                                    + ", ");

                    // add the windSpeed field data
                    channelIndex = rbnbChannelMap.Add("windSpeed"); // 5
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsInt32(channelIndex, new int[] { davisWxParser.getWindSpeed() });
                    decimalASCIISampleData.append(
                            String.format("%03d", (Object) new Integer(davisWxParser.getWindSpeed())) + ", ");

                    // add the tenMinuteAverageWindSpeed field data
                    channelIndex = rbnbChannelMap.Add("tenMinuteAverageWindSpeed"); // 4
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsInt32(channelIndex,
                            new int[] { davisWxParser.getTenMinuteAverageWindSpeed() });
                    decimalASCIISampleData.append(String.format("%03d",
                            (Object) new Integer(davisWxParser.getTenMinuteAverageWindSpeed())) + ", ");

                    // add the windDirection field data
                    channelIndex = rbnbChannelMap.Add("windDirection"); // 80
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsInt32(channelIndex, new int[] { davisWxParser.getWindDirection() });
                    decimalASCIISampleData.append(
                            String.format("%03d", (Object) new Integer(davisWxParser.getWindDirection()))
                                    + ", ");

                    // add the extraTemperature fields as ASCII only
                    float[] extraTemperatures = davisWxParser.getExtraTemperatures();
                    for (float temperature : extraTemperatures) {
                        decimalASCIISampleData
                                .append(String.format("%05.2f", (Object) new Float(temperature)) + ", ");

                    }

                    // add the soilTemperature fields as ASCII only
                    float[] soilTemperatures = davisWxParser.getSoilTemperatures();
                    for (float soil : soilTemperatures) {
                        decimalASCIISampleData.append(String.format("%05.2f", (Object) new Float(soil)) + ", ");

                    }

                    // add the leafTemperature fields as ASCII only
                    float[] leafTemperatures = davisWxParser.getLeafTemperatures();
                    for (float leaf : leafTemperatures) {
                        decimalASCIISampleData.append(String.format("%05.2f", (Object) new Float(leaf)) + ", ");

                    }

                    // add the outsideHumidity field data
                    channelIndex = rbnbChannelMap.Add("outsideHumidity"); // 73
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsInt32(channelIndex,
                            new int[] { davisWxParser.getOutsideHumidity() });
                    decimalASCIISampleData.append(
                            String.format("%03d", (Object) new Integer(davisWxParser.getOutsideHumidity()))
                                    + ", ");

                    // add the rainRate field data
                    channelIndex = rbnbChannelMap.Add("rainRate"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex, new float[] { davisWxParser.getRainRate() });
                    decimalASCIISampleData.append(
                            String.format("%04.2f", (Object) new Float(davisWxParser.getRainRate())) + ", ");

                    // add the uvRadiation field data
                    channelIndex = rbnbChannelMap.Add("uvRadiation"); // 0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsInt32(channelIndex, new int[] { davisWxParser.getUvRadiation() });
                    decimalASCIISampleData.append(
                            String.format("%03d", (Object) new Integer(davisWxParser.getUvRadiation())) + ", ");

                    // add the solarRadiation field data
                    channelIndex = rbnbChannelMap.Add("solarRadiation"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getSolarRadiation() });
                    decimalASCIISampleData.append(
                            String.format("%04.1f", (Object) new Float(davisWxParser.getSolarRadiation()))
                                    + ", ");

                    // add the stormRain field data
                    channelIndex = rbnbChannelMap.Add("stormRain"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex, new float[] { davisWxParser.getStormRain() });
                    decimalASCIISampleData.append(
                            String.format("%04.2f", (Object) new Float(davisWxParser.getStormRain())) + ", ");

                    // add the currentStormStartDate field data
                    channelIndex = rbnbChannelMap.Add("currentStormStartDate"); // -1--1-1999
                    rbnbChannelMap.PutMime(channelIndex, "text/plain");
                    rbnbChannelMap.PutDataAsString(channelIndex, davisWxParser.getCurrentStormStartDate());
                    decimalASCIISampleData.append(
                            String.format("%10s", (Object) davisWxParser.getCurrentStormStartDate()) + ", ");

                    // add the dailyRain field data
                    channelIndex = rbnbChannelMap.Add("dailyRain"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex, new float[] { davisWxParser.getDailyRain() });
                    decimalASCIISampleData.append(
                            String.format("%04.2f", (Object) new Float(davisWxParser.getDailyRain())) + ", ");

                    // add the monthlyRain field data
                    channelIndex = rbnbChannelMap.Add("monthlyRain"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getMonthlyRain() });
                    decimalASCIISampleData.append(
                            String.format("%04.2f", (Object) new Float(davisWxParser.getMonthlyRain())) + ", ");

                    // add the yearlyRain field data
                    channelIndex = rbnbChannelMap.Add("yearlyRain"); // 15.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getYearlyRain() });
                    decimalASCIISampleData.append(
                            String.format("%04.2f", (Object) new Float(davisWxParser.getYearlyRain())) + ", ");

                    // add the dailyEvapoTranspiration field data
                    channelIndex = rbnbChannelMap.Add("dailyEvapoTranspiration"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getDailyEvapoTranspiration() });
                    decimalASCIISampleData.append(String.format("%04.2f",
                            (Object) new Float(davisWxParser.getDailyEvapoTranspiration())) + ", ");

                    // add the monthlyEvapoTranspiration field data
                    channelIndex = rbnbChannelMap.Add("monthlyEvapoTranspiration"); // 0.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getMonthlyEvapoTranspiration() });
                    decimalASCIISampleData.append(String.format("%04.2f",
                            (Object) new Float(davisWxParser.getMonthlyEvapoTranspiration())) + ", ");

                    // add the yearlyEvapoTranspiration field data
                    channelIndex = rbnbChannelMap.Add("yearlyEvapoTranspiration"); // 93.0
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getYearlyEvapoTranspiration() });
                    decimalASCIISampleData.append(String.format("%04.2f",
                            (Object) new Float(davisWxParser.getYearlyEvapoTranspiration())) + ", ");

                    // add the consoleBatteryVoltage field data
                    channelIndex = rbnbChannelMap.Add("consoleBatteryVoltage"); // 4.681640625
                    rbnbChannelMap.PutMime(channelIndex, "application/octet-stream");
                    rbnbChannelMap.PutDataAsFloat32(channelIndex,
                            new float[] { davisWxParser.getConsoleBatteryVoltage() });
                    decimalASCIISampleData.append(String.format("%04.2f",
                            (Object) new Float(davisWxParser.getConsoleBatteryVoltage())) + ", ");

                    // add the forecastAsString field data
                    channelIndex = rbnbChannelMap.Add("forecastAsString"); // Partially Cloudy
                    rbnbChannelMap.PutMime(channelIndex, "text/plain");
                    rbnbChannelMap.PutDataAsString(channelIndex, davisWxParser.getForecastAsString());
                    decimalASCIISampleData.append(
                            String.format("\"%47s\"", (Object) davisWxParser.getForecastAsString()) + ", ");

                    // add the forecastRuleNumberAsString field data as ASCII only
                    decimalASCIISampleData.append(
                            String.format("\"%167s\"", (Object) davisWxParser.getForecastRuleNumberAsString())
                                    + ", ");

                    // add the timeOfSunrise field data
                    channelIndex = rbnbChannelMap.Add("timeOfSunrise"); // 05:49
                    rbnbChannelMap.PutMime(channelIndex, "text/plain");
                    rbnbChannelMap.PutDataAsString(channelIndex, davisWxParser.getTimeOfSunrise());
                    decimalASCIISampleData
                            .append(String.format("%5s", (Object) davisWxParser.getTimeOfSunrise()) + ", ");

                    // add the timeOfSunset field data
                    channelIndex = rbnbChannelMap.Add("timeOfSunset"); // 19:11
                    rbnbChannelMap.PutMime(channelIndex, "text/plain");
                    rbnbChannelMap.PutDataAsString(channelIndex, davisWxParser.getTimeOfSunset());
                    decimalASCIISampleData
                            .append(String.format("%5s", (Object) davisWxParser.getTimeOfSunset()) + ", ");

                    // then add a timestamp to the end of the sample
                    DATE_FORMAT.setTimeZone(TZ);
                    String sampleDateAsString = DATE_FORMAT.format(new Date()).toString();
                    decimalASCIISampleData.append(sampleDateAsString);
                    decimalASCIISampleData.append("\n");

                    // add the ASCII CSV string of selected fields as a channel
                    channelIndex = rbnbChannelMap.Add(getRBNBChannelName()); // 19:11
                    rbnbChannelMap.PutMime(channelIndex, "text/plain");
                    rbnbChannelMap.PutDataAsString(channelIndex, decimalASCIISampleData.toString());

                    // finally, send the channel map of data to the DataTurbine
                    getSource().Flush(rbnbChannelMap);
                    String sampleString = new String(Hex.encodeHex(sampleArray));
                    logger.info("Sample: " + sampleString);
                    logger.debug("barTrendAsString:               " + davisWxParser.getBarTrendAsString());
                    logger.debug("barometer:                      " + davisWxParser.getBarometer());
                    logger.debug("insideTemperature:              " + davisWxParser.getInsideTemperature());
                    logger.debug("insideHumidity:                 " + davisWxParser.getInsideHumidity());
                    logger.debug("outsideTemperature:             " + davisWxParser.getOutsideTemperature());
                    logger.debug("windSpeed:                      " + davisWxParser.getWindSpeed());
                    logger.debug(
                            "tenMinuteAverageWindSpeed:      " + davisWxParser.getTenMinuteAverageWindSpeed());
                    logger.debug("windDirection:                  " + davisWxParser.getWindDirection());
                    logger.debug("outsideHumidity:                " + davisWxParser.getOutsideHumidity());
                    logger.debug("rainRate:                       " + davisWxParser.getRainRate());
                    logger.debug("uvRadiation:                    " + davisWxParser.getUvRadiation());
                    logger.debug("solarRadiation:                 " + davisWxParser.getSolarRadiation());
                    logger.debug("stormRain:                      " + davisWxParser.getStormRain());
                    logger.debug("currentStormStartDate:          " + davisWxParser.getCurrentStormStartDate());
                    logger.debug("dailyRain:                      " + davisWxParser.getDailyRain());
                    logger.debug("monthlyRain:                    " + davisWxParser.getMonthlyRain());
                    logger.debug("yearlyRain:                     " + davisWxParser.getYearlyRain());
                    logger.debug(
                            "dailyEvapoTranspiration:        " + davisWxParser.getDailyEvapoTranspiration());
                    logger.debug(
                            "monthlyEvapoTranspiration:      " + davisWxParser.getMonthlyEvapoTranspiration());
                    logger.debug(
                            "yearlyEvapoTranspiration:       " + davisWxParser.getYearlyEvapoTranspiration());
                    logger.debug("transmitterBatteryStatus:       "
                            + Arrays.toString(davisWxParser.getTransmitterBatteryStatus()));
                    logger.debug("consoleBatteryVoltage:          " + davisWxParser.getConsoleBatteryVoltage());
                    logger.debug("forecastAsString:               " + davisWxParser.getForecastAsString());
                    //logger.debug("forecastRuleNumberAsString:     " + davisWxParser.getForecastRuleNumberAsString());
                    logger.debug("timeOfSunrise:                  " + davisWxParser.getTimeOfSunrise());
                    logger.debug("timeOfSunset:                   " + davisWxParser.getTimeOfSunset());
                    logger.info(" flushed data to the DataTurbine. ");

                    byteOne = 0x00;
                    byteTwo = 0x00;
                    byteThree = 0x00;
                    byteFour = 0x00;
                    sampleBuffer.clear();
                    sampleByteCount = 0;
                    rbnbChannelMap.Clear();
                    //logger.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");
                    //state = 0;

                    // Once the sample is flushed, take a new sample
                    // allow time for the instrument response
                    streamingThread.sleep(2000);
                    this.command = this.commandPrefix + this.takeSampleCommand + this.commandSuffix;
                    this.sentCommand = queryInstrument(command);

                } // end switch statement

                // shift the bytes in the FIFO window
                byteFour = byteThree;
                byteThree = byteTwo;
                byteTwo = byteOne;

            } //end while (more unread bytes)

            // prepare the buffer to read in more bytes from the stream
            buffer.compact();

        } // end while (more socket bytes to read)
        this.socketChannel.close();

    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        e.printStackTrace();
        return !failed;
    } catch (SAPIException sapie) {
        // In the event of an RBNB communication  exception, log the exception, 
        // and allow execute() to return false, which will prompt a retry.
        failed = true;
        sapie.printStackTrace();
        return !failed;
    } catch (java.lang.InterruptedException ine) {
        failed = true;
        ine.printStackTrace();
        return !failed;

    }

    return !failed;
}

From source file:org.dkf.jmule.StoragePicker.java

public static String handle(Context context, int requestCode, int resultCode, Intent data) {
    String result = null;//from w  ww.  j  a  va  2  s  .co m
    try {

        if (resultCode == Activity.RESULT_OK && requestCode == SELECT_FOLDER_REQUEST_CODE) {
            Uri treeUri = data.getData();

            ContentResolver cr = context.getContentResolver();

            Method takePersistableUriPermissionM = cr.getClass().getMethod("takePersistableUriPermission",
                    Uri.class, int.class);
            final int takeFlags = data.getFlags()
                    & (Intent.FLAG_GRANT_READ_URI_PERMISSION | Intent.FLAG_GRANT_WRITE_URI_PERMISSION);
            takePersistableUriPermissionM.invoke(cr, treeUri, takeFlags);

            if (treeUri == null) {
                UIUtils.showShortMessage(context, R.string.storage_picker_treeuri_null);
                result = null;
            } else {
                DocumentFile file = DocumentFile.fromTreeUri(context, treeUri);
                if (!file.isDirectory()) {
                    UIUtils.showShortMessage(context, R.string.storage_picker_treeuri_not_directory);
                    result = null;
                } else if (!file.canWrite()) {
                    UIUtils.showShortMessage(context, R.string.storage_picker_treeuri_cant_write);
                    result = null;
                } else {
                    if (Platforms.get().saf()) {
                        LollipopFileSystem fs = (LollipopFileSystem) Platforms.fileSystem();
                        result = fs.getTreePath(treeUri);

                        // TODO - remove below code - only for testing SD card writing
                        File testFile = new File(result, "test_file.txt");
                        LOG.info("test file {}", testFile);

                        try {
                            Pair<ParcelFileDescriptor, DocumentFile> fd = fs.openFD(testFile, "rw");
                            if (fd != null && fd.first != null && fd.second != null) {
                                AndroidFileHandler ah = new AndroidFileHandler(testFile, fd.second, fd.first);
                                ByteBuffer bb = ByteBuffer.allocate(48);
                                bb.putInt(1).putInt(2).putInt(3).putInt(44).putInt(22);
                                bb.flip();
                                ah.getWriteChannel().write(bb);
                                ah.close();
                            } else {
                                LOG.error("unable to create file {}", testFile);
                            }
                        } catch (Exception e) {
                            LOG.error("unable to fill file {} error {}", testFile, e);
                        }
                    }
                }
            }
        }
    } catch (Exception e) {
        UIUtils.showShortMessage(context, R.string.storage_picker_treeuri_error);
        LOG.error("Error handling folder selection {}", e);
        result = null;
    }

    return result;
}