Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:libepg.ts.reader.Reader2.java

/**
 * ??????<br>//from  w  w w. j  a  va 2s.co  m
 * 1:???????????1??????<br>
 * 2:?????????????<br>
 * 3:??????1??????<br>
 * ???????????<br>
 * 4:1?<br>
 *
 * @return ???
 */
public synchronized List<TsPacket> getPackets() {
    ByteBuffer packetBuffer = ByteBuffer.allocate(TsPacket.TS_PACKET_BYTE_LENGTH.PACKET_LENGTH.getByteLength());
    byte[] byteData = new byte[1];

    //?
    List<TsPacket> packets = new ArrayList<>();

    FileInputStream fis = null;
    PushbackInputStream pis = null;
    try {

        fis = new FileInputStream(this.TSFile);
        pis = new PushbackInputStream(fis);

        boolean tipOfPacket = false;//?

        long count = 0;

        //??????1??????
        while (pis.read(byteData) != EOF) {

            //???????????????
            if ((byteData[0] == TsPacket.TS_SYNC_BYTE) && (tipOfPacket == false)) {
                tipOfPacket = true;
                if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) {
                    LOG.trace(
                            "???????????1????");
                }
                pis.unread(byteData);
            }

            if (tipOfPacket == true) {
                byte[] tsPacketData = new byte[TsPacket.TS_PACKET_BYTE_LENGTH.PACKET_LENGTH.getByteLength()];
                if (pis.read(tsPacketData) != EOF) {
                    if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) {
                        LOG.trace(
                                "??????????????");
                    }
                    packetBuffer.put(tsPacketData);
                } else {
                    break;
                }
            }

            if (packetBuffer.remaining() == 0) {
                byte[] BeforeCutDown = packetBuffer.array();
                byte[] AfterCutDown = new byte[packetBuffer.position()];
                System.arraycopy(BeforeCutDown, 0, AfterCutDown, 0, AfterCutDown.length);

                //??????????
                TsPacket tsp = new TsPacket(AfterCutDown);

                //                        LOG.debug(Hex.encodeHexString(tsp.getData()));
                if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) {
                    LOG.trace(
                            "1???????? ");
                    LOG.trace(tsp.toString());
                }

                if (tsp.getTransport_error_indicator() != 0) {
                    if (LOG.isWarnEnabled()) {
                        LOG.warn(
                                "??1????????????????????");
                        LOG.warn(tsp);
                        LOG.warn(TSFile);
                    }
                    tipOfPacket = false;
                } else {
                    packets.add(tsp);
                    count++;
                }
                packetBuffer.clear();
                tipOfPacket = false;

                if (this.readLimit != null && count >= this.readLimit) {
                    if (LOG.isInfoEnabled()) {
                        LOG.info(
                                "????????????? ?? = "
                                        + this.readLimit);
                    }
                    break;
                }
            }

        }
        if (LOG.isTraceEnabled() && NOT_DETERRENT_READ_TRACE_LOG) {
            LOG.trace("?????????");
            LOG.trace(" = " + Hex.encodeHexString(packetBuffer.array()));
        }

        pis.close();
        fis.close();
        LOG.info("??? = " + count);

    } catch (FileNotFoundException e) {
        LOG.fatal("?????", e);
    } catch (IOException e) {
        LOG.fatal("???", e);
    }
    return Collections.unmodifiableList(packets);
}

From source file:edu.hawaii.soest.pacioos.text.SocketTextSource.java

@Override
protected boolean execute() {

    log.debug("SocketTextSource.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    /* Get a connection to the instrument */
    SocketChannel socket = getSocketConnection();
    if (socket == null)
        return false;

    // while data are being sent, read them into the buffer
    try {/*w  w  w. j  a  v a  2s .  c  om*/
        // create four byte placeholders used to evaluate up to a four-byte 
        // window.  The FIFO layout looks like:
        //           -------------------------
        //   in ---> | One | Two |Three|Four |  ---> out
        //           -------------------------
        byte byteOne = 0x00, // set initial placeholder values
                byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00;

        // Create a buffer that will store the sample bytes as they are read
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        // create a byte buffer to store bytes from the TCP stream
        ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize());

        // while there are bytes to read from the socket ...
        while (socket.read(buffer) != -1 || buffer.position() > 0) {

            // prepare the buffer for reading
            buffer.flip();

            // while there are unread bytes in the ByteBuffer
            while (buffer.hasRemaining()) {
                byteOne = buffer.get();

                // log the byte stream
                String character = new String(new byte[] { byteOne });
                if (log.isDebugEnabled()) {
                    List<Byte> whitespaceBytes = new ArrayList<Byte>();
                    whitespaceBytes.add(new Byte((byte) 0x0A));
                    whitespaceBytes.add(new Byte((byte) 0x0D));
                    if (whitespaceBytes.contains(new Byte(byteOne))) {
                        character = new String(Hex.encodeHex((new byte[] { byteOne })));

                    }
                }
                log.debug("char: " + character + "\t" + "b1: "
                        + new String(Hex.encodeHex((new byte[] { byteOne }))) + "\t" + "b2: "
                        + new String(Hex.encodeHex((new byte[] { byteTwo }))) + "\t" + "b3: "
                        + new String(Hex.encodeHex((new byte[] { byteThree }))) + "\t" + "b4: "
                        + new String(Hex.encodeHex((new byte[] { byteFour }))) + "\t" + "sample pos: "
                        + sampleBuffer.position() + "\t" + "sample rem: " + sampleBuffer.remaining() + "\t"
                        + "sample cnt: " + sampleByteCount + "\t" + "buffer pos: " + buffer.position() + "\t"
                        + "buffer rem: " + buffer.remaining() + "\t" + "state: " + state);

                // evaluate each byte to find the record delimiter(s), and when found, validate and
                // send the sample to the DataTurbine.
                int numberOfChannelsFlushed = 0;

                if (getRecordDelimiters().length == 2) {
                    // have we hit the delimiters in the stream yet?
                    if (byteTwo == getFirstDelimiterByte() && byteOne == getSecondDelimiterByte()) {
                        sampleBuffer.put(byteOne);
                        sampleByteCount++;
                        // extract just the length of the sample bytes out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the DataTurbine.
                        log.debug("Sample byte count: " + sampleByteCount);
                        byte[] sampleArray = new byte[sampleByteCount];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);
                        String sampleString = new String(sampleArray, "US-ASCII");

                        if (validateSample(sampleString)) {
                            numberOfChannelsFlushed = sendSample(sampleString);

                        }

                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        log.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");

                    } else {
                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            log.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                    }

                } else if (getRecordDelimiters().length == 1) {
                    // have we hit the delimiter in the stream yet?
                    if (byteOne == getFirstDelimiterByte()) {
                        sampleBuffer.put(byteOne);
                        sampleByteCount++;
                        // extract just the length of the sample bytes out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the DataTurbine.
                        byte[] sampleArray = new byte[sampleByteCount];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);
                        String sampleString = new String(sampleArray, "US-ASCII");

                        if (validateSample(sampleString)) {
                            numberOfChannelsFlushed = sendSample(sampleString);

                        }

                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        log.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");

                    } else {
                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            log.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                    }

                } // end getRecordDelimiters().length

                // shift the bytes in the FIFO window
                byteFour = byteThree;
                byteThree = byteTwo;
                byteTwo = byteOne;

            } //end while (more unread bytes)

            // prepare the buffer to read in more bytes from the stream
            buffer.compact();

        } // end while (more socket bytes to read)
        socket.close();

    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        log.error("There was a communication error in sending the data sample. The message was: "
                + e.getMessage());
        if (log.isDebugEnabled()) {
            e.printStackTrace();
        }
        return !failed;

    } catch (SAPIException sapie) {
        // In the event of an RBNB communication  exception, log the exception, 
        // and allow execute() to return false, which will prompt a retry.
        failed = true;
        log.error("There was an RBNB error while sending the data sample. The message was: "
                + sapie.getMessage());
        if (log.isDebugEnabled()) {
            sapie.printStackTrace();
        }
        return !failed;
    }

    return !failed;

}

From source file:org.commoncrawl.io.internal.NIOHttpConnection.java

private void processChunkedContent() throws IOException {

    while (_inBuf.available() != 0 && _chunkState != ChunkState.STATE_DONE) {

        switch (_chunkState) {

        case STATE_AWAITING_CHUNK_HEADER: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // get the newly extracted line ... 
                String line = _chunkLineBuffer.toString();
                // now find first occurence of whitespace ... 
                int whiteSpaceIdx = line.indexOf(' ');
                if (whiteSpaceIdx != -1) {
                    line = line.substring(0, whiteSpaceIdx);
                }/*from www . j a v  a2s  . com*/
                // now extract chunk length ... 
                try {
                    _chunkSize = Integer.parseInt(line, 16);
                } catch (NumberFormatException e) {
                    LOG.error("Invalid Chunk Size Encountered reading CHUNK HEADER:" + line);
                    throw new IOException("Invalid chunk size");
                }
                // reset chunk pos cursor ... 
                _chunkPos = 0;
                // reset chunk read state 
                _chunkCRLFReadState = CRLFReadState.NONE;
                // reset the buffer for the next potential line read ... 
                _chunkLineBuffer.setLength(0);

                // now interpret the chunk size value ... 
                if (_chunkSize > 0) {
                    _chunkState = ChunkState.STATE_READING_CHUNK;
                } else {
                    _chunkState = ChunkState.STATE_AWAITING_TRAILERS;
                }
            }
        }
            break;

        case STATE_READING_CHUNK: {

            // calculate amount we want to read in ... 
            int amountToRead = Math.min(_chunkSize - _chunkPos, _inBuf.available());
            // and track amount we wrote into chunk content buffer 
            int amountWritten = 0;

            while (amountToRead != 0) {

                // get a write buffer ... 
                ByteBuffer writeBuffer = _chunkContentBuffer.getWriteBuf();

                // get the next read buffer 
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer == writeBuffer) {
                    throw new RuntimeException("BAD NEWS!!!");
                }

                //TODO: There is an opportunity here to skip buffer copy altogether and add read buffer directly to write buffer list 
                //            Need to look into this. 

                // if buffer size is > amountToRead ... 
                if (readBuffer.remaining() > writeBuffer.remaining() || readBuffer.remaining() > amountToRead) {

                    // slice the read buffer ... 
                    ByteBuffer sliced = readBuffer.slice();
                    // calculate slice amount 
                    int sliceAmount = Math.min(writeBuffer.remaining(), amountToRead);

                    // and increment original ... 
                    readBuffer.position(readBuffer.position() + sliceAmount);
                    // and limit sliced buffer scope ... 
                    sliced.limit(sliced.position() + sliceAmount);
                    // reduce amountToRead 
                    amountToRead -= sliceAmount;
                    // and increment chunk pos 
                    _chunkPos += sliceAmount;
                    // track amount written ... 
                    amountWritten += sliced.remaining();
                    // append it ... 
                    writeBuffer.put(sliced);
                    // and put back the read buffer 
                    _inBuf.putBack(readBuffer);
                }
                // otherwise... append whole buffer to write buffer 
                else {
                    // reduce amountToRead 
                    amountToRead -= readBuffer.remaining();
                    // and increment chunk pos 
                    _chunkPos += readBuffer.remaining();
                    // track amount written 
                    amountWritten += readBuffer.remaining();
                    // append as much as possible into the write buffer ... 
                    writeBuffer.put(readBuffer);
                }
            }

            // if we wrote some data to the content buffer ... 
            if (amountWritten != 0) {
                // update bytes downloaded ...
                _downloadedContentLength += amountWritten;

                if (getListener() != null) {
                    // inform listener of content availability 
                    getListener().HttpContentAvailable(this, _chunkContentBuffer);
                }
            }

            // now if we read in a chunks worth of data ... advance state ... 
            if (_chunkPos == _chunkSize) {
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_EOL;
            }
        }
            break;

        case STATE_AWAITING_CHUNK_EOL: {

            if (_inBuf.available() >= 2) {
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer.get() != '\r') {
                    LOG.error("Missing CR from Chunk Data Terminator");
                    throw new IOException("missing CR");
                }
                // now if read buffer is expended ... release it and get another one ... 
                if (readBuffer.remaining() == 0) {
                    readBuffer = _inBuf.read();
                }

                if (readBuffer.get() != '\n') {
                    LOG.error("Missing LFfrom Chunk Data Terminator");
                    throw new IOException("missing LF");
                }
                // put back the read buffer 
                _inBuf.putBack(readBuffer);
                // and transition to the next state ... 
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_HEADER;
            } else {
                // break out and wait for more data 
                return;
            }
        }
            break;

        case STATE_AWAITING_TRAILERS: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // transition to a done state ... 
                _chunkState = ChunkState.STATE_DONE;
                // clear out intermediate crlf state
                _chunkCRLFReadState = CRLFReadState.NONE;
                _chunkLineBuffer.setLength(0);
            } else {
                break;
            }
        }
        // fall through if chunk state is done ... 

        case STATE_DONE: {
            // clear out existing input buffer ...
            _inBuf.reset();
            // flush chunk buffer ...
            _chunkContentBuffer.flush();
            // and swap it with the real content buffer ... 
            _inBuf = _chunkContentBuffer;
            // reset chunk state ... 
            _chunkContentBuffer = null;
            // reset chunked flag ... 
            _chunked = false;
            // set HTTP DONE state ... 
            setState(State.DONE, null);
        }
            break;
        }
    }
}

From source file:com.robonobo.eon.SEONConnection.java

/**
 * Blocks until there is data to read/*from w  w  w  .  j  a v  a 2s  .com*/
 * 
 * @return A byte buffer with the incoming data
 */
public void read(ByteBuffer buf) throws EONException {
    receiveLock.lock();
    try {
        while (true) {
            while (incomingDataBufs.size() == 0) {
                if (state == State.Closed)
                    return;
                try {
                    haveData.await();
                } catch (InterruptedException e) {
                    throw new EONException(e);
                }
            }
            ByteBuffer incoming = (ByteBuffer) incomingDataBufs.getFirst();
            if (buf.remaining() >= incoming.remaining())
                buf.put(incoming);
            else {
                int remain = buf.remaining();
                buf.put(incoming.array(), incoming.position(), remain);
                incoming.position(incoming.position() + remain);
            }
            if (incoming.remaining() == 0)
                incomingDataBufs.removeFirst();
            if (buf.remaining() == 0)
                return;
            if (incomingDataBufs.size() == 0)
                return;
        }
    } finally {
        receiveLock.unlock();
    }
}

From source file:org.commoncrawl.io.NIOHttpConnection.java

private void processChunkedContent() throws IOException {

    while (_inBuf.available() != 0 && _chunkState != ChunkState.STATE_DONE) {

        switch (_chunkState) {

        case STATE_AWAITING_CHUNK_HEADER: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // get the newly extracted line ...
                String line = _chunkLineBuffer.toString();
                // now find first occurence of whitespace ...
                int whiteSpaceIdx = line.indexOf(' ');
                if (whiteSpaceIdx != -1) {
                    line = line.substring(0, whiteSpaceIdx);
                }/*www .  j  ava 2 s.c om*/
                // now extract chunk length ...
                try {
                    _chunkSize = Integer.parseInt(line, 16);
                } catch (NumberFormatException e) {
                    LOG.error("Connection:[" + getId()
                            + "] Invalid Chunk Size Encountered reading CHUNK HEADER:" + line);
                    throw new IOException("Invalid chunk size");
                }
                // reset chunk pos cursor ...
                _chunkPos = 0;
                // reset chunk read state
                _chunkCRLFReadState = CRLFReadState.NONE;
                // reset the buffer for the next potential line read ...
                _chunkLineBuffer.setLength(0);

                // now interpret the chunk size value ...
                if (_chunkSize > 0) {
                    _chunkState = ChunkState.STATE_READING_CHUNK;
                } else {
                    _chunkState = ChunkState.STATE_AWAITING_TRAILERS;
                }
            }
        }
            break;

        case STATE_READING_CHUNK: {

            // calculate amount we want to read in ...
            int amountToRead = Math.min(_chunkSize - _chunkPos, _inBuf.available());
            // and track amount we wrote into chunk content buffer
            int amountWritten = 0;

            while (amountToRead != 0) {

                // get a write buffer ...
                ByteBuffer writeBuffer = _chunkContentBuffer.getWriteBuf();

                // get the next read buffer
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer == writeBuffer) {
                    throw new RuntimeException("BAD NEWS!!!");
                }

                // TODO: There is an opportunity here to skip buffer copy altogether
                // and add read buffer directly to write buffer list
                // Need to look into this.

                // if buffer size is > amountToRead ...
                if (readBuffer.remaining() > writeBuffer.remaining() || readBuffer.remaining() > amountToRead) {

                    // slice the read buffer ...
                    ByteBuffer sliced = readBuffer.slice();
                    // calculate slice amount
                    int sliceAmount = Math.min(writeBuffer.remaining(), amountToRead);

                    // and increment original ...
                    readBuffer.position(readBuffer.position() + sliceAmount);
                    // and limit sliced buffer scope ...
                    sliced.limit(sliced.position() + sliceAmount);
                    // reduce amountToRead
                    amountToRead -= sliceAmount;
                    // and increment chunk pos
                    _chunkPos += sliceAmount;
                    // track amount written ...
                    amountWritten += sliced.remaining();
                    // append it ...
                    writeBuffer.put(sliced);
                    // and put back the read buffer
                    _inBuf.putBack(readBuffer);
                }
                // otherwise... append whole buffer to write buffer
                else {
                    // reduce amountToRead
                    amountToRead -= readBuffer.remaining();
                    // and increment chunk pos
                    _chunkPos += readBuffer.remaining();
                    // track amount written
                    amountWritten += readBuffer.remaining();
                    // append as much as possible into the write buffer ...
                    writeBuffer.put(readBuffer);
                }
            }

            // if we wrote some data to the content buffer ...
            if (amountWritten != 0) {
                // update bytes downloaded ...
                _downloadedContentLength += amountWritten;

                if (getListener() != null) {
                    // inform listener of content availability
                    getListener().HttpContentAvailable(this, _chunkContentBuffer);
                }
            }

            // now if we read in a chunks worth of data ... advance state ...
            if (_chunkPos == _chunkSize) {
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_EOL;
            }
        }
            break;

        case STATE_AWAITING_CHUNK_EOL: {

            if (_inBuf.available() >= 2) {
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer.get() != '\r') {
                    LOG.error("Connection:[" + getId() + "] Missing CR from Chunk Data Terminator");
                    throw new IOException("missing CR");
                }
                // now if read buffer is expended ... release it and get another one
                // ...
                if (readBuffer.remaining() == 0) {
                    readBuffer = _inBuf.read();
                }

                if (readBuffer.get() != '\n') {
                    LOG.error("Connection:[" + getId() + "] Missing LFfrom Chunk Data Terminator");
                    throw new IOException("missing LF");
                }
                // put back the read buffer
                _inBuf.putBack(readBuffer);
                // and transition to the next state ...
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_HEADER;
            } else {
                // break out and wait for more data
                return;
            }
        }
            break;

        case STATE_AWAITING_TRAILERS: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // transition to a done state ...
                _chunkState = ChunkState.STATE_DONE;
                // clear out intermediate crlf state
                _chunkCRLFReadState = CRLFReadState.NONE;
                _chunkLineBuffer.setLength(0);
            } else {
                break;
            }
        }
        // fall through if chunk state is done ...

        case STATE_DONE: {
            // clear out existing input buffer ...
            _inBuf.reset();
            // flush chunk buffer ...
            _chunkContentBuffer.flush();
            // and swap it with the real content buffer ...
            _inBuf = _chunkContentBuffer;
            // reset chunk state ...
            _chunkContentBuffer = null;
            // reset chunked flag ...
            _chunked = false;
            // set HTTP DONE state ...
            setState(State.DONE, null);
        }
            break;
        }
    }
}

From source file:org.apache.hadoop.hbase.ipc.RpcServer.java

/**
 * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}.
 * If the amount of data is large, it writes to channel in smaller chunks.
 * This is to avoid jdk from creating many direct buffers as the size of
 * ByteBuffer increases. There should not be any performance degredation.
 *
 * @param channel writable byte channel to write on
 * @param buffer buffer to write/* www. j av  a 2s . c o  m*/
 * @return number of bytes written
 * @throws java.io.IOException e
 * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)
 */
protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException {

    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer)
            : channelIO(channel, null, buffer);
    if (count > 0) {
        metrics.receivedBytes(count);
    }
    return count;
}

From source file:edu.hawaii.soest.kilonalu.tchain.TChainSource.java

/**
 * A method that executes the streaming of data from the source to the RBNB
 * server after all configuration of settings, connections to hosts, and
 * thread initiatizing occurs.  This method contains the detailed code for 
 * streaming the data and interpreting the stream.
 *//*from   www. j a v a  2s  . c  o m*/
protected boolean execute() {
    logger.debug("TChainSource.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    SocketChannel socket = getSocketConnection();

    // while data are being sent, read them into the buffer
    try {
        // create four byte placeholders used to evaluate up to a four-byte 
        // window.  The FIFO layout looks like:
        //           -------------------------
        //   in ---> | One | Two |Three|Four |  ---> out
        //           -------------------------
        byte byteOne = 0x00, // set initial placeholder values
                byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00;

        // Create a buffer that will store the sample bytes as they are read
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        // create a byte buffer to store bytes from the TCP stream
        ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize());

        // add a channel of data that will be pushed to the server.  
        // Each sample will be sent to the Data Turbine as an rbnb frame.
        ChannelMap rbnbChannelMap = new ChannelMap();

        // while there are bytes to read from the socket ...
        while (socket.read(buffer) != -1 || buffer.position() > 0) {

            // prepare the buffer for reading
            buffer.flip();

            // while there are unread bytes in the ByteBuffer
            while (buffer.hasRemaining()) {
                byteOne = buffer.get();
                logger.debug("char: " + (char) byteOne + "\t" + "b1: "
                        + new String(Hex.encodeHex((new byte[] { byteOne }))) + "\t" + "b2: "
                        + new String(Hex.encodeHex((new byte[] { byteTwo }))) + "\t" + "b3: "
                        + new String(Hex.encodeHex((new byte[] { byteThree }))) + "\t" + "b4: "
                        + new String(Hex.encodeHex((new byte[] { byteFour }))) + "\t" + "sample pos: "
                        + sampleBuffer.position() + "\t" + "sample rem: " + sampleBuffer.remaining() + "\t"
                        + "sample cnt: " + sampleByteCount + "\t" + "buffer pos: " + buffer.position() + "\t"
                        + "buffer rem: " + buffer.remaining() + "\t" + "state: " + state);

                // Use a State Machine to process the byte stream.
                // Start building an rbnb frame for the entire sample, first by 
                // inserting a timestamp into the channelMap.  This time is merely
                // the time of insert into the data turbine, not the time of
                // observations of the measurements.  That time should be parsed out
                // of the sample in the Sink client code

                switch (state) {

                case 0:

                    // sample line ending is '\r\n' (carraige return, newline)
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == this.firstDelimiterByte && byteTwo == this.secondDelimiterByte) {
                        // we've found the end of a sample, move on
                        state = 1;
                        break;

                    } else {
                        break;
                    }

                case 1: // read the rest of the bytes to the next EOL characters

                    // sample line is terminated by record delimiter bytes (usually \r\n or \n)
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == this.firstDelimiterByte && byteTwo == this.secondDelimiterByte) {

                        // rewind the sample to overwrite the line ending so we can add
                        // in the timestamp (then add the line ending)
                        sampleBuffer.position(sampleBuffer.position() - 1);
                        --sampleByteCount;

                        // add the delimiter to the end of the sample.
                        byte[] delimiterAsBytes = getFieldDelimiter().getBytes("US-ASCII");

                        for (byte delim : delimiterAsBytes) {
                            sampleBuffer.put(delim);
                            sampleByteCount++;
                        }

                        // then add a timestamp to the end of the sample
                        DATE_FORMAT.setTimeZone(TZ);
                        byte[] sampleDateAsBytes = DATE_FORMAT.format(new Date()).getBytes("US-ASCII");
                        for (byte b : sampleDateAsBytes) {
                            sampleBuffer.put(b);
                            sampleByteCount++;
                        }

                        // add the last two bytes found (usually \r\n) to the sample buffer
                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);
                            sampleByteCount++;
                            sampleBuffer.put(byteTwo);
                            sampleByteCount++;

                        } else {
                            sampleBuffer.compact();
                            sampleBuffer.put(byteOne);
                            sampleByteCount++;
                            sampleBuffer.put(byteTwo);
                            sampleByteCount++;

                        }

                        // extract just the length of the sample bytes out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the data turbine.
                        byte[] sampleArray = new byte[sampleByteCount];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);

                        // send the sample to the data turbine
                        rbnbChannelMap.PutTimeAuto("server");
                        String sampleString = new String(sampleArray, "US-ASCII");
                        int channelIndex = rbnbChannelMap.Add(getRBNBChannelName());
                        rbnbChannelMap.PutMime(channelIndex, "text/plain");
                        rbnbChannelMap.PutDataAsString(channelIndex, sampleString);
                        getSource().Flush(rbnbChannelMap);
                        logger.info("Sample: " + sampleString.substring(0, sampleString.length() - 2)
                                + " sent data to the DataTurbine. ");

                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        rbnbChannelMap.Clear();
                        logger.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");
                        //state = 0;

                    } else { // not 0x0D20

                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);
                        } else {
                            sampleBuffer.compact();
                            logger.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                        break;
                    } // end if for 0x0D20 EOL

                } // end switch statement

                // shift the bytes in the FIFO window
                byteFour = byteThree;
                byteThree = byteTwo;
                byteTwo = byteOne;

            } //end while (more unread bytes)

            // prepare the buffer to read in more bytes from the stream
            buffer.compact();

        } // end while (more socket bytes to read)
        socket.close();

    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        e.printStackTrace();
        return !failed;
    } catch (SAPIException sapie) {
        // In the event of an RBNB communication  exception, log the exception, 
        // and allow execute() to return false, which will prompt a retry.
        failed = true;
        sapie.printStackTrace();
        return !failed;
    }

    return !failed;
}

From source file:com.healthmarketscience.jackcess.Column.java

/**
 * Write an LVAL column into a ByteBuffer inline if it fits, otherwise in
 * other data page(s)./* w w  w .j  av a2s. c  o  m*/
 * @param value Value of the LVAL column
 * @return A buffer containing the LVAL definition and (possibly) the column
 *         value (unless written to other pages)
 * @usage _advanced_method_
 */
public ByteBuffer writeLongValue(byte[] value, int remainingRowLength) throws IOException {
    if (value.length > getType().getMaxSize()) {
        throw new IOException(
                "value too big for column, max " + getType().getMaxSize() + ", got " + value.length);
    }

    // determine which type to write
    byte type = 0;
    int lvalDefLen = getFormat().SIZE_LONG_VALUE_DEF;
    if (((getFormat().SIZE_LONG_VALUE_DEF + value.length) <= remainingRowLength)
            && (value.length <= getFormat().MAX_INLINE_LONG_VALUE_SIZE)) {
        type = LONG_VALUE_TYPE_THIS_PAGE;
        lvalDefLen += value.length;
    } else if (value.length <= getFormat().MAX_LONG_VALUE_ROW_SIZE) {
        type = LONG_VALUE_TYPE_OTHER_PAGE;
    } else {
        type = LONG_VALUE_TYPE_OTHER_PAGES;
    }

    ByteBuffer def = getPageChannel().createBuffer(lvalDefLen);
    // take length and apply type to first byte
    int lengthWithFlags = value.length | (type << 24);
    def.putInt(lengthWithFlags);

    if (type == LONG_VALUE_TYPE_THIS_PAGE) {
        // write long value inline
        def.putInt(0);
        def.putInt(0); //Unknown
        def.put(value);
    } else {

        TempPageHolder lvalBufferH = getTable().getLongValueBuffer();
        ByteBuffer lvalPage = null;
        int firstLvalPageNum = PageChannel.INVALID_PAGE_NUMBER;
        byte firstLvalRow = 0;

        // write other page(s)
        switch (type) {
        case LONG_VALUE_TYPE_OTHER_PAGE:
            lvalPage = getLongValuePage(value.length, lvalBufferH);
            firstLvalPageNum = lvalBufferH.getPageNumber();
            firstLvalRow = (byte) Table.addDataPageRow(lvalPage, value.length, getFormat(), 0);
            lvalPage.put(value);
            getPageChannel().writePage(lvalPage, firstLvalPageNum);
            break;

        case LONG_VALUE_TYPE_OTHER_PAGES:

            ByteBuffer buffer = ByteBuffer.wrap(value);
            int remainingLen = buffer.remaining();
            buffer.limit(0);
            lvalPage = getLongValuePage(getFormat().MAX_LONG_VALUE_ROW_SIZE, lvalBufferH);
            firstLvalPageNum = lvalBufferH.getPageNumber();
            int lvalPageNum = firstLvalPageNum;
            ByteBuffer nextLvalPage = null;
            int nextLvalPageNum = 0;
            while (remainingLen > 0) {
                lvalPage.clear();

                // figure out how much we will put in this page (we need 4 bytes for
                // the next page pointer)
                int chunkLength = Math.min(getFormat().MAX_LONG_VALUE_ROW_SIZE - 4, remainingLen);

                // figure out if we will need another page, and if so, allocate it
                if (chunkLength < remainingLen) {
                    // force a new page to be allocated
                    lvalBufferH.clear();
                    nextLvalPage = getLongValuePage(getFormat().MAX_LONG_VALUE_ROW_SIZE, lvalBufferH);
                    nextLvalPageNum = lvalBufferH.getPageNumber();
                } else {
                    nextLvalPage = null;
                    nextLvalPageNum = 0;
                }

                // add row to this page
                byte lvalRow = (byte) Table.addDataPageRow(lvalPage, chunkLength + 4, getFormat(), 0);

                // write next page info (we'll always be writing into row 0 for
                // newly created pages)
                lvalPage.put((byte) 0); // row number
                ByteUtil.put3ByteInt(lvalPage, nextLvalPageNum); // page number

                // write this page's chunk of data
                buffer.limit(buffer.limit() + chunkLength);
                lvalPage.put(buffer);
                remainingLen -= chunkLength;

                // write new page to database
                getPageChannel().writePage(lvalPage, lvalPageNum);

                if (lvalPageNum == firstLvalPageNum) {
                    // save initial row info
                    firstLvalRow = lvalRow;
                } else {
                    // check assertion that we wrote to row 0 for all subsequent pages
                    if (lvalRow != (byte) 0) {
                        throw new IllegalStateException("Expected row 0, but was " + lvalRow);
                    }
                }

                // move to next page
                lvalPage = nextLvalPage;
                lvalPageNum = nextLvalPageNum;
            }
            break;

        default:
            throw new IOException("Unrecognized long value type: " + type);
        }

        // update def
        def.put(firstLvalRow);
        ByteUtil.put3ByteInt(def, firstLvalPageNum);
        def.putInt(0); //Unknown

    }

    def.flip();
    return def;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Update the row on which the given rowState is currently positioned.
 * <p>/*from   www . j a va  2 s. co  m*/
 * Note, this method is not generally meant to be used directly.  You should
 * use the {@link #updateCurrentRow} method or use the Cursor class, which
 * allows for more complex table interactions, e.g.
 * {@link Cursor#setCurrentRowValue} and {@link Cursor#updateCurrentRow}.
 * @usage _advanced_method_
 */
public void updateRow(RowState rowState, RowId rowId, Object... row) throws IOException {
    requireValidRowId(rowId);

    // ensure that the relevant row state is up-to-date
    ByteBuffer rowBuffer = positionAtRowData(rowState, rowId);
    int oldRowSize = rowBuffer.remaining();

    requireNonDeletedRow(rowState, rowId);

    // we need to make sure the row is the right length & type (fill with
    // null if too short).
    if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) {
        row = dupeRow(row, _columns.size());
    }

    // fill in any auto-numbers (we don't allow autonumber values to be
    // modified)
    handleAutoNumbersForUpdate(row, rowBuffer, rowState);

    // hang on to the raw values of var length columns we are "keeping".  this
    // will allow us to re-use pre-written var length data, which can save
    // space for things like long value columns.
    Map<Column, byte[]> rawVarValues = (!_varColumns.isEmpty() ? new HashMap<Column, byte[]>() : null);

    // fill in any "keep value" fields
    for (Column column : _columns) {
        if (column.getRowValue(row) == Column.KEEP_VALUE) {
            column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, rowState, rawVarValues));
        }
    }

    // generate new row bytes
    ByteBuffer newRowData = createRow(row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize,
            rawVarValues);

    if (newRowData.limit() > getFormat().MAX_ROW_SIZE) {
        throw new IOException("Row size " + newRowData.limit() + " is too large");
    }

    if (!_indexDatas.isEmpty()) {
        Object[] oldRowValues = rowState.getRowValues();

        // delete old values from indexes
        for (IndexData indexData : _indexDatas) {
            indexData.deleteRow(oldRowValues, rowId);
        }
    }

    // see if we can squeeze the new row data into the existing row
    rowBuffer.reset();
    int rowSize = newRowData.remaining();

    ByteBuffer dataPage = null;
    int pageNumber = PageChannel.INVALID_PAGE_NUMBER;

    if (oldRowSize >= rowSize) {

        // awesome, slap it in!
        rowBuffer.put(newRowData);

        // grab the page we just updated
        dataPage = rowState.getFinalPage();
        pageNumber = rowState.getFinalRowId().getPageNumber();

    } else {

        // bummer, need to find a new page for the data
        dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER);
        pageNumber = _addRowBufferH.getPageNumber();

        RowId headerRowId = rowState.getHeaderRowId();
        ByteBuffer headerPage = rowState.getHeaderPage();
        if (pageNumber == headerRowId.getPageNumber()) {
            // new row is on the same page as header row, share page
            dataPage = headerPage;
        }

        // write out the new row data (set the deleted flag on the new data row
        // so that it is ignored during normal table traversal)
        int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK);
        dataPage.put(newRowData);

        // write the overflow info into the header row and clear out the
        // remaining header data
        rowBuffer = PageChannel.narrowBuffer(headerPage,
                findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()),
                findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat()));
        rowBuffer.put((byte) rowNum);
        ByteUtil.put3ByteInt(rowBuffer, pageNumber);
        ByteUtil.clearRemaining(rowBuffer);

        // set the overflow flag on the header row
        int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat());
        headerPage.putShort(headerRowIndex, (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK));
        if (pageNumber != headerRowId.getPageNumber()) {
            writeDataPage(headerPage, headerRowId.getPageNumber());
        }
    }

    // update the indexes
    for (IndexData indexData : _indexDatas) {
        indexData.addRow(row, rowId);
    }

    writeDataPage(dataPage, pageNumber);

    updateTableDefinition(0);
}

From source file:hivemall.GeneralLearnerBaseUDTF.java

protected void recordTrainSampleToTempFile(@Nonnull final FeatureValue[] featureVector, final float target)
        throws HiveException {
    if (iterations == 1) {
        return;/*from   ww w .j a va2 s .c om*/
    }

    ByteBuffer buf = inputBuf;
    NioStatefulSegment dst = fileIO;

    if (buf == null) {
        final File file;
        try {
            file = File.createTempFile("hivemall_general_learner", ".sgmt");
            file.deleteOnExit();
            if (!file.canWrite()) {
                throw new UDFArgumentException("Cannot write a temporary file: " + file.getAbsolutePath());
            }
            logger.info("Record training samples to a file: " + file.getAbsolutePath());
        } catch (IOException ioe) {
            throw new UDFArgumentException(ioe);
        } catch (Throwable e) {
            throw new UDFArgumentException(e);
        }
        this.inputBuf = buf = ByteBuffer.allocateDirect(1024 * 1024); // 1 MB
        this.fileIO = dst = new NioStatefulSegment(file, false);
    }

    int featureVectorBytes = 0;
    for (FeatureValue f : featureVector) {
        if (f == null) {
            continue;
        }
        int featureLength = f.getFeatureAsString().length();

        // feature as String (even if it is Text or Integer)
        featureVectorBytes += SizeOf.CHAR * featureLength;

        // NIOUtils.putString() first puts the length of string before string itself
        featureVectorBytes += SizeOf.INT;

        // value
        featureVectorBytes += SizeOf.DOUBLE;
    }

    // feature length, feature 1, feature 2, ..., feature n, target
    int recordBytes = SizeOf.INT + featureVectorBytes + SizeOf.FLOAT;
    int requiredBytes = SizeOf.INT + recordBytes; // need to allocate space for "recordBytes" itself

    int remain = buf.remaining();
    if (remain < requiredBytes) {
        writeBuffer(buf, dst);
    }

    buf.putInt(recordBytes);
    buf.putInt(featureVector.length);
    for (FeatureValue f : featureVector) {
        writeFeatureValue(buf, f);
    }
    buf.putFloat(target);
}