Example usage for java.nio ByteBuffer flip

List of usage examples for java.nio ByteBuffer flip

Introduction

In this page you can find the example usage for java.nio ByteBuffer flip.

Prototype

public final Buffer flip() 

Source Link

Document

Flips this buffer.

Usage

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private JournalChannel writeV2Journal(File journalDir, int numEntries) throws Exception {
    long logId = System.currentTimeMillis();
    JournalChannel jc = new JournalChannel(journalDir, logId);

    moveToPosition(jc, JournalChannel.VERSION_HEADER_SIZE);

    BufferedChannel bc = jc.getBufferedChannel();

    byte[] data = new byte[1024];
    Arrays.fill(data, (byte) 'X');
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    for (int i = 1; i <= numEntries; i++) {
        ByteBuffer packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data)
                .toByteBuffer();//from  w  w w. j  a  v  a  2s  . com
        lastConfirmed = i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();

        bc.write(lenBuff);
        bc.write(packet);
    }
    bc.flush(true);

    updateJournalVersion(jc, JournalChannel.V2);

    return jc;
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private JournalChannel writeV3Journal(File journalDir, int numEntries, byte[] masterKey) throws Exception {
    long logId = System.currentTimeMillis();
    JournalChannel jc = new JournalChannel(journalDir, logId);

    moveToPosition(jc, JournalChannel.VERSION_HEADER_SIZE);

    BufferedChannel bc = jc.getBufferedChannel();

    byte[] data = new byte[1024];
    Arrays.fill(data, (byte) 'X');
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    for (int i = 0; i <= numEntries; i++) {
        ByteBuffer packet;/*from w  ww  .  j  ava 2 s. com*/
        if (i == 0) {
            packet = generateMetaEntry(1, masterKey);
        } else {
            packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data).toByteBuffer();
        }
        lastConfirmed = i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();

        bc.write(lenBuff);
        bc.write(packet);
    }
    bc.flush(true);

    updateJournalVersion(jc, JournalChannel.V3);

    return jc;
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

/**
 * Generate fence entry/*from  www.  jav  a2 s .c  o m*/
 */
private ByteBuffer generateFenceEntry(long ledgerId) {
    ByteBuffer bb = ByteBuffer.allocate(8 + 8);
    bb.putLong(ledgerId);
    bb.putLong(Bookie.METAENTRY_ID_FENCE_KEY);
    bb.flip();
    return bb;
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private JournalChannel writeV4Journal(File journalDir, int numEntries, byte[] masterKey) throws Exception {
    long logId = System.currentTimeMillis();
    JournalChannel jc = new JournalChannel(journalDir, logId);

    moveToPosition(jc, JournalChannel.VERSION_HEADER_SIZE);

    BufferedChannel bc = jc.getBufferedChannel();

    byte[] data = new byte[1024];
    Arrays.fill(data, (byte) 'X');
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    for (int i = 0; i <= numEntries; i++) {
        ByteBuffer packet;/*from   w w  w .ja va 2  s  .  c  om*/
        if (i == 0) {
            packet = generateMetaEntry(1, masterKey);
        } else {
            packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data).toByteBuffer();
        }
        lastConfirmed = i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();
        bc.write(lenBuff);
        bc.write(packet);
    }
    // write fence key
    ByteBuffer packet = generateFenceEntry(1);
    ByteBuffer lenBuf = ByteBuffer.allocate(4);
    lenBuf.putInt(packet.remaining());
    lenBuf.flip();
    bc.write(lenBuf);
    bc.write(packet);
    bc.flush(true);
    updateJournalVersion(jc, JournalChannel.V4);
    return jc;
}

From source file:edu.hawaii.soest.pacioos.text.SocketTextSource.java

@Override
protected boolean execute() {

    log.debug("SocketTextSource.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    /* Get a connection to the instrument */
    SocketChannel socket = getSocketConnection();
    if (socket == null)
        return false;

    // while data are being sent, read them into the buffer
    try {//w  w  w . j  a v a2  s . co  m
        // create four byte placeholders used to evaluate up to a four-byte 
        // window.  The FIFO layout looks like:
        //           -------------------------
        //   in ---> | One | Two |Three|Four |  ---> out
        //           -------------------------
        byte byteOne = 0x00, // set initial placeholder values
                byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00;

        // Create a buffer that will store the sample bytes as they are read
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        // create a byte buffer to store bytes from the TCP stream
        ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize());

        // while there are bytes to read from the socket ...
        while (socket.read(buffer) != -1 || buffer.position() > 0) {

            // prepare the buffer for reading
            buffer.flip();

            // while there are unread bytes in the ByteBuffer
            while (buffer.hasRemaining()) {
                byteOne = buffer.get();

                // log the byte stream
                String character = new String(new byte[] { byteOne });
                if (log.isDebugEnabled()) {
                    List<Byte> whitespaceBytes = new ArrayList<Byte>();
                    whitespaceBytes.add(new Byte((byte) 0x0A));
                    whitespaceBytes.add(new Byte((byte) 0x0D));
                    if (whitespaceBytes.contains(new Byte(byteOne))) {
                        character = new String(Hex.encodeHex((new byte[] { byteOne })));

                    }
                }
                log.debug("char: " + character + "\t" + "b1: "
                        + new String(Hex.encodeHex((new byte[] { byteOne }))) + "\t" + "b2: "
                        + new String(Hex.encodeHex((new byte[] { byteTwo }))) + "\t" + "b3: "
                        + new String(Hex.encodeHex((new byte[] { byteThree }))) + "\t" + "b4: "
                        + new String(Hex.encodeHex((new byte[] { byteFour }))) + "\t" + "sample pos: "
                        + sampleBuffer.position() + "\t" + "sample rem: " + sampleBuffer.remaining() + "\t"
                        + "sample cnt: " + sampleByteCount + "\t" + "buffer pos: " + buffer.position() + "\t"
                        + "buffer rem: " + buffer.remaining() + "\t" + "state: " + state);

                // evaluate each byte to find the record delimiter(s), and when found, validate and
                // send the sample to the DataTurbine.
                int numberOfChannelsFlushed = 0;

                if (getRecordDelimiters().length == 2) {
                    // have we hit the delimiters in the stream yet?
                    if (byteTwo == getFirstDelimiterByte() && byteOne == getSecondDelimiterByte()) {
                        sampleBuffer.put(byteOne);
                        sampleByteCount++;
                        // extract just the length of the sample bytes out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the DataTurbine.
                        log.debug("Sample byte count: " + sampleByteCount);
                        byte[] sampleArray = new byte[sampleByteCount];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);
                        String sampleString = new String(sampleArray, "US-ASCII");

                        if (validateSample(sampleString)) {
                            numberOfChannelsFlushed = sendSample(sampleString);

                        }

                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        log.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");

                    } else {
                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            log.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                    }

                } else if (getRecordDelimiters().length == 1) {
                    // have we hit the delimiter in the stream yet?
                    if (byteOne == getFirstDelimiterByte()) {
                        sampleBuffer.put(byteOne);
                        sampleByteCount++;
                        // extract just the length of the sample bytes out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the DataTurbine.
                        byte[] sampleArray = new byte[sampleByteCount];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);
                        String sampleString = new String(sampleArray, "US-ASCII");

                        if (validateSample(sampleString)) {
                            numberOfChannelsFlushed = sendSample(sampleString);

                        }

                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        log.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");

                    } else {
                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            log.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                    }

                } // end getRecordDelimiters().length

                // shift the bytes in the FIFO window
                byteFour = byteThree;
                byteThree = byteTwo;
                byteTwo = byteOne;

            } //end while (more unread bytes)

            // prepare the buffer to read in more bytes from the stream
            buffer.compact();

        } // end while (more socket bytes to read)
        socket.close();

    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        log.error("There was a communication error in sending the data sample. The message was: "
                + e.getMessage());
        if (log.isDebugEnabled()) {
            e.printStackTrace();
        }
        return !failed;

    } catch (SAPIException sapie) {
        // In the event of an RBNB communication  exception, log the exception, 
        // and allow execute() to return false, which will prompt a retry.
        failed = true;
        log.error("There was an RBNB error while sending the data sample. The message was: "
                + sapie.getMessage());
        if (log.isDebugEnabled()) {
            sapie.printStackTrace();
        }
        return !failed;
    }

    return !failed;

}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private JournalChannel writeV5Journal(File journalDir, int numEntries, byte[] masterKey) throws Exception {
    long logId = System.currentTimeMillis();
    JournalChannel jc = new JournalChannel(journalDir, logId);

    BufferedChannel bc = jc.getBufferedChannel();

    ByteBuffer paddingBuff = ByteBuffer.allocateDirect(2 * JournalChannel.SECTOR_SIZE);
    ZeroBuffer.put(paddingBuff);/*from w  w  w.j  ava2s .c o  m*/
    byte[] data = new byte[4 * 1024 * 1024];
    Arrays.fill(data, (byte) 'X');
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    long length = 0;
    for (int i = 0; i <= numEntries; i++) {
        ByteBuffer packet;
        if (i == 0) {
            packet = generateMetaEntry(1, masterKey);
        } else {
            packet = ClientUtil.generatePacket(1, i, lastConfirmed, length, data, 0, i).toByteBuffer();
        }
        lastConfirmed = i;
        length += i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();
        bc.write(lenBuff);
        bc.write(packet);
        Journal.writePaddingBytes(jc, paddingBuff, JournalChannel.SECTOR_SIZE);
    }
    // write fence key
    ByteBuffer packet = generateFenceEntry(1);
    ByteBuffer lenBuf = ByteBuffer.allocate(4);
    lenBuf.putInt(packet.remaining());
    lenBuf.flip();
    bc.write(lenBuf);
    bc.write(packet);
    Journal.writePaddingBytes(jc, paddingBuff, JournalChannel.SECTOR_SIZE);
    bc.flush(true);
    updateJournalVersion(jc, JournalChannel.V5);
    return jc;
}

From source file:org.alfresco.contentstore.ChecksumTest.java

protected int applyPatch(ReadableByteChannel inChannel, WritableByteChannel outChannel,
        PatchDocument patchDocument) throws IOException {
    InChannel c = new InChannel(inChannel, patchDocument.getMatchedBlocks(), patchDocument.getBlockSize());

    int totalWritten = 0;

    int blockIndex = c.nextBlock();
    if (blockIndex > -1) {
        for (Patch patch : patchDocument.getPatches()) {
            int lastMatchingBlockIndex = patch.getLastMatchIndex();

            while (blockIndex != -1 && blockIndex <= lastMatchingBlockIndex) {
                int bytesWritten = outChannel.write(c.currentBlock);
                totalWritten += bytesWritten;
                if (bytesWritten != c.blockSize) {
                    throw new RuntimeException("Wrote too few bytes, " + c.blockSize + ", " + bytesWritten);
                }//from w ww. j a v  a2  s . com

                blockIndex = c.nextBlock();
                if (blockIndex == -1) {
                    break;
                }
            }

            // apply patch
            int patchSize = patch.getSize();
            ReadableByteChannel patchChannel = Channels.newChannel(patch.getStream());
            ByteBuffer patchBB = ByteBuffer.allocate(patchSize);
            int bytesRead = patchChannel.read(patchBB);
            patchBB.flip();
            int bytesWritten = outChannel.write(patchBB);
            totalWritten += bytesWritten;
            if (bytesWritten != bytesRead) {
                throw new RuntimeException(
                        "Wrote too few bytes, expected " + bytesRead + ", got " + bytesWritten);
            }
        }

        // we're done with all the patches, add the remaining blocks
        while (blockIndex != -1) {
            int bytesWritten = outChannel.write(c.currentBlock);
            totalWritten += bytesWritten;
            if (bytesWritten != c.bytesRead) {
                throw new RuntimeException("Wrote too few bytes");
            }

            blockIndex = c.nextBlock();
        }
    }

    return totalWritten;
}

From source file:com.tongbanjie.tarzan.rpc.protocol.RpcCommand.java

public ByteBuffer encodeHeader(final int bodyLength) throws RpcCommandException {
    /******* ? *******/
    // 1> protocol type size
    int length = Protocol.PROTOCOL_TYPE_SIZE;

    // 2> header length size
    length += Protocol.HEADER_LENGTH_SIZE;

    // 3> header data length
    byte[] headerData;
    headerData = this.headerEncode();
    length += headerData.length;//from   w  ww  .  j  av a2 s .c  o m

    // 4> body data length
    length += bodyLength;

    /******* ByteBuffer *******/
    //??body
    ByteBuffer result = ByteBuffer.allocate(Protocol.TOTAL_LENGTH_SIZE + length - bodyLength);

    // 0?length
    result.putInt(length);

    // 1?protocol type
    result.put(markProtocolType(serializeType));

    // 2?header length
    result.putInt(headerData.length);

    // 3?header data
    result.put(headerData);

    result.flip();

    return result;
}

From source file:de.digitalcollections.streaming.euphoria.controller.StreamingController.java

/**
 * Stream the given input to the given output via NIO {@link Channels} and a directly allocated NIO
 * {@link ByteBuffer}. Both the input and output streams will implicitly be closed after streaming, regardless of
 * whether an exception is been thrown or not.
 *
 * @param input The input stream.//from ww w  . jav a  2s .  com
 * @param output The output stream.
 * @return The length of the written bytes.
 * @throws IOException When an I/O error occurs.
 */
private long stream(InputStream input, OutputStream output) throws IOException {
    try (ReadableByteChannel inputChannel = Channels.newChannel(input);
            WritableByteChannel outputChannel = Channels.newChannel(output)) {
        ByteBuffer buffer = ByteBuffer.allocateDirect(DEFAULT_STREAM_BUFFER_SIZE);
        long size = 0;

        while (inputChannel.read(buffer) != -1) {
            buffer.flip();
            size += outputChannel.write(buffer);
            buffer.clear();
        }

        return size;
    }
}

From source file:org.alfresco.contentstore.AbstractContentStore.java

protected int applyPatch(ReadableByteChannel inChannel, WritableByteChannel outChannel,
        PatchDocument patchDocument) throws IOException {
    InChannel c = new InChannel(inChannel, patchDocument.getMatchedBlocks(), patchDocument.getBlockSize());

    int totalWritten = 0;

    int blockIndex = -1;

    //        int blockIndex = c.nextBlock();
    //        if(blockIndex > -1)
    //        {/* www  .  j  a v a  2  s .c  o  m*/
    for (Patch patch : patchDocument.getPatches()) {
        int lastMatchingBlockIndex = patch.getLastMatchIndex();

        blockIndex = c.nextBlock();
        while (blockIndex != -1 && blockIndex <= lastMatchingBlockIndex) {
            int bytesWritten = outChannel.write(c.currentBlock);
            totalWritten += bytesWritten;
            if (bytesWritten != c.bytesRead) {
                throw new RuntimeException("Wrote too few bytes, " + c.blockSize + ", " + bytesWritten);
            }

            blockIndex = c.nextBlock();
            if (blockIndex == -1) {
                break;
            }
        }

        // apply patch
        int patchSize = patch.getSize();
        ReadableByteChannel patchChannel = Channels.newChannel(patch.getStream());
        ByteBuffer patchBB = ByteBuffer.allocate(patchSize);
        int bytesRead = patchChannel.read(patchBB);
        patchBB.flip();
        int bytesWritten = outChannel.write(patchBB);
        totalWritten += bytesWritten;
        if (bytesWritten != bytesRead) {
            throw new RuntimeException("Wrote too few bytes, expected " + bytesRead + ", got " + bytesWritten);
        }
    }

    // we're done with all the patches, add the remaining blocks
    while (blockIndex != -1) {
        int bytesWritten = outChannel.write(c.currentBlock);
        totalWritten += bytesWritten;
        if (bytesWritten != c.bytesRead) {
            throw new RuntimeException("Wrote too few bytes");
        }

        blockIndex = c.nextBlock();
    }
    //        }

    return totalWritten;
}