Example usage for io.netty.buffer CompositeByteBuf writerIndex

List of usage examples for io.netty.buffer CompositeByteBuf writerIndex

Introduction

In this page you can find the example usage for io.netty.buffer CompositeByteBuf writerIndex.

Prototype

@Override
    public int writerIndex() 

Source Link

Usage

From source file:com.github.sparkfy.network.util.TransportFrameDecoder.java

License:Apache License

private ByteBuf decodeNext() throws Exception {
    long frameSize = decodeFrameSize();
    if (frameSize == UNKNOWN_FRAME_SIZE || totalSize < frameSize) {
        return null;
    }//from w w  w.  j  a  v a 2  s.  c o m

    // Reset size for next frame.
    nextFrameSize = UNKNOWN_FRAME_SIZE;

    Preconditions.checkArgument(frameSize < MAX_FRAME_SIZE, "Too large frame: %s", frameSize);
    Preconditions.checkArgument(frameSize > 0, "Frame length should be positive: %s", frameSize);

    // If the first buffer holds the entire frame, return it.
    int remaining = (int) frameSize;
    if (buffers.getFirst().readableBytes() >= remaining) {
        return nextBufferForFrame(remaining);
    }

    // Otherwise, create a composite buffer.
    CompositeByteBuf frame = buffers.getFirst().alloc().compositeBuffer();
    while (remaining > 0) {
        ByteBuf next = nextBufferForFrame(remaining);
        remaining -= next.readableBytes();
        frame.addComponent(next).writerIndex(frame.writerIndex() + next.readableBytes());
    }
    assert remaining == 0;
    return frame;
}

From source file:io.advantageous.conekt.http.impl.HttpClientRequestImpl.java

License:Open Source License

private void write(ByteBuf buff, boolean end) {
    int readableBytes = buff.readableBytes();
    if (readableBytes == 0 && !end) {
        // nothing to write to the connection just return
        return;//from  w  ww  .  j a  v  a  2 s  .c o  m
    }

    if (end) {
        completed = true;
    }
    if (!end && !chunked && !contentLengthSet()) {
        throw new IllegalStateException(
                "You must set the Content-Length header to be the total size of the message "
                        + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
    }

    written += buff.readableBytes();
    if (conn == null) {
        if (pendingChunks == null) {
            pendingChunks = buff;
        } else {
            CompositeByteBuf pending;
            if (pendingChunks instanceof CompositeByteBuf) {
                pending = (CompositeByteBuf) pendingChunks;
            } else {
                pending = Unpooled.compositeBuffer();
                pending.addComponent(pendingChunks).writerIndex(pendingChunks.writerIndex());
                pendingChunks = pending;
            }
            pending.addComponent(buff).writerIndex(pending.writerIndex() + buff.writerIndex());
        }
        connect();
    } else {
        if (!headWritten) {
            writeHeadWithContent(buff, end);
        } else {
            if (end) {
                if (buff.isReadable()) {
                    conn.writeToChannel(new DefaultLastHttpContent(buff, false));
                } else {
                    conn.writeToChannel(LastHttpContent.EMPTY_LAST_CONTENT);
                }
            } else {
                conn.writeToChannel(new DefaultHttpContent(buff));
            }
        }
        if (end) {
            conn.reportBytesWritten(written);

            if (respHandler != null) {
                conn.endRequest();
            }
        }
    }
}

From source file:io.jsync.http.impl.DefaultHttpClientRequest.java

License:Open Source License

private synchronized DefaultHttpClientRequest write(ByteBuf buff, boolean end) {
    int readableBytes = buff.readableBytes();
    if (readableBytes == 0 && !end) {
        // nothing to write to the connection just return
        return this;
    }//from  w  w w. ja v a2 s .c  om

    if (end) {
        completed = true;
    }

    written += buff.readableBytes();

    if (!end && !raw && !chunked && !contentLengthSet()) {
        throw new IllegalStateException(
                "You must set the Content-Length header to be the total size of the message "
                        + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
    }

    if (conn == null) {
        if (pendingChunks == null) {
            pendingChunks = buff;
        } else {
            CompositeByteBuf pending;
            if (pendingChunks instanceof CompositeByteBuf) {
                pending = (CompositeByteBuf) pendingChunks;
            } else {
                pending = Unpooled.compositeBuffer();
                pending.addComponent(pendingChunks).writerIndex(pendingChunks.writerIndex());
                pendingChunks = pending;
            }
            pending.addComponent(buff).writerIndex(pending.writerIndex() + buff.writerIndex());
        }
        connect();
    } else {
        if (!headWritten) {
            writeHeadWithContent(buff, end);
        } else {
            if (end) {
                writeEndChunk(buff);
            } else {
                sendChunk(buff);
            }
        }
        if (end) {
            conn.endRequest();
        }
    }
    return this;
}

From source file:io.vertx.core.http.impl.HttpClientRequestImpl.java

License:Open Source License

private void write(ByteBuf buff, boolean end) {
    if (buff == null && !end) {
        // nothing to write to the connection just return
        return;/*from  w  w  w  .jav a  2s  .c o m*/
    }

    if (end) {
        if (buff != null && !chunked && !contentLengthSet()) {
            headers().set(CONTENT_LENGTH, String.valueOf(buff.writerIndex()));
        }
    } else {
        if (!chunked && !contentLengthSet()) {
            throw new IllegalStateException(
                    "You must set the Content-Length header to be the total size of the message "
                            + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
        }
    }

    if (buff != null) {
        written += buff.readableBytes();
        if (followRedirects > 0) {
            if (cachedChunks == null) {
                cachedChunks = Unpooled.compositeBuffer();
            }
            cachedChunks.addComponent(buff).writerIndex(cachedChunks.writerIndex() + buff.writerIndex());
        }
    }

    if (stream == null) {
        if (buff != null) {
            if (pendingChunks == null) {
                pendingChunks = buff;
            } else {
                CompositeByteBuf pending;
                if (pendingChunks instanceof CompositeByteBuf) {
                    pending = (CompositeByteBuf) pendingChunks;
                } else {
                    pending = Unpooled.compositeBuffer();
                    pending.addComponent(pendingChunks).writerIndex(pendingChunks.writerIndex());
                    pendingChunks = pending;
                }
                pending.addComponent(buff).writerIndex(pending.writerIndex() + buff.writerIndex());
            }
        }
        connect(null);
    } else {
        if (!headWritten) {
            writeHeadWithContent(buff, end);
        } else {
            stream.writeBuffer(buff, end);
        }
        if (end) {
            stream.connection().reportBytesWritten(written);
            if (respHandler != null) {
                stream.endRequest();
            }
        }
    }

    if (end) {
        completed = true;
        if (completionHandler != null) {
            completionHandler.handle(null);
        }
    }
}

From source file:org.apache.spark.network.util.TransportFrameDecoder.java

License:Apache License

private ByteBuf decodeNext() {
    long frameSize = decodeFrameSize();
    if (frameSize == UNKNOWN_FRAME_SIZE || totalSize < frameSize) {
        return null;
    }/*from w w  w  .j a v a2  s . c  o m*/

    // Reset size for next frame.
    nextFrameSize = UNKNOWN_FRAME_SIZE;

    Preconditions.checkArgument(frameSize < MAX_FRAME_SIZE, "Too large frame: %s", frameSize);
    Preconditions.checkArgument(frameSize > 0, "Frame length should be positive: %s", frameSize);

    // If the first buffer holds the entire frame, return it.
    int remaining = (int) frameSize;
    if (buffers.getFirst().readableBytes() >= remaining) {
        return nextBufferForFrame(remaining);
    }

    // Otherwise, create a composite buffer.
    CompositeByteBuf frame = buffers.getFirst().alloc().compositeBuffer(Integer.MAX_VALUE);
    while (remaining > 0) {
        ByteBuf next = nextBufferForFrame(remaining);
        remaining -= next.readableBytes();
        frame.addComponent(next).writerIndex(frame.writerIndex() + next.readableBytes());
    }
    assert remaining == 0;
    return frame;
}

From source file:org.apache.spark.util.TransportFrameDecoder.java

License:Apache License

private ByteBuf decodeNext() throws Exception {
    long frameSize = decodeFrameSize();
    if (frameSize == UNKNOWN_FRAME_SIZE || totalSize < frameSize) {
        return null;
    }/*from  ww w  . j  a va 2s  .c  om*/

    // Reset size for next frame.
    nextFrameSize = UNKNOWN_FRAME_SIZE;

    Preconditions.checkArgument(frameSize < MAX_FRAME_SIZE, "Too large frame: %s", frameSize);
    Preconditions.checkArgument(frameSize > 0, "Frame length should be positive: %s", frameSize);

    // If the first buffer holds the entire frame, return it.
    int remaining = (int) frameSize;
    if (buffers.getFirst().readableBytes() >= remaining) {
        return nextBufferForFrame(remaining);
    }

    // Otherwise, create a composite buffer.
    CompositeByteBuf frame = buffers.getFirst().alloc().compositeBuffer(Integer.MAX_VALUE);
    while (remaining > 0) {
        ByteBuf next = nextBufferForFrame(remaining);
        remaining -= next.readableBytes();
        frame.addComponent(next).writerIndex(frame.writerIndex() + next.readableBytes());
    }
    assert remaining == 0;
    return frame;
}

From source file:org.vertx.java.core.http.impl.DefaultHttpClientRequest.java

License:Open Source License

private DefaultHttpClientRequest write(ByteBuf buff, boolean end) {
    int readableBytes = buff.readableBytes();
    if (readableBytes == 0 && !end) {
        // nothing to write to the connection just return
        return this;
    }/*ww  w .  j  av  a 2  s .c om*/

    if (end) {
        completed = true;
    }

    written += buff.readableBytes();

    if (!end && !raw && !chunked && !contentLengthSet()) {
        throw new IllegalStateException(
                "You must set the Content-Length header to be the total size of the message "
                        + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
    }

    if (conn == null) {
        if (pendingChunks == null) {
            pendingChunks = buff;
        } else {
            CompositeByteBuf pending;
            if (pendingChunks instanceof CompositeByteBuf) {
                pending = (CompositeByteBuf) pendingChunks;
            } else {
                pending = Unpooled.compositeBuffer();
                pending.addComponent(pendingChunks).writerIndex(pendingChunks.writerIndex());
                pendingChunks = pending;
            }
            pending.addComponent(buff).writerIndex(pending.writerIndex() + buff.writerIndex());
        }
        connect();
    } else {
        if (!headWritten) {
            writeHeadWithContent(buff, end);
        } else {
            if (end) {
                writeEndChunk(buff);
            } else {
                sendChunk(buff);
            }
        }
        if (end) {
            conn.endRequest();
        }
    }
    return this;
}

From source file:org.wso2.carbon.gateway.internal.mediation.camel.CarbonMessageTypeConverter.java

License:Open Source License

@SuppressWarnings("unchecked")

public <T> T convertTo(Class<T> type, Exchange exchange, Object value) {
    if (value instanceof CarbonMessage) {
        //Retrieving the Pipe from the carbon message
        Pipe pipe = ((CarbonMessage) value).getPipe();
        //Input stream used for building the desired message
        ByteBufInputStream byteBufInputStream = null;
        //Create a composite buffer from content chunks in the pipe
        CompositeByteBuf contentBuf = aggregateChunks(pipe);
        //Check whether we have any content to be processed
        if (contentBuf.capacity() != 0) {
            try {
                if (type.isAssignableFrom(Document.class)) {
                    //Convert the input stream into xml dom element
                    return (T) toDocument(contentBuf, exchange);
                } else if (type.isAssignableFrom(DOMSource.class)) {
                    return (T) toDOMSource(contentBuf, exchange);
                } else if (type.isAssignableFrom(SAXSource.class)) {
                    return (T) toSAXSource(contentBuf, exchange);
                } else if (type.isAssignableFrom(StAXSource.class)) {
                    return (T) toStAXSource(contentBuf, exchange);
                } else if (type.isAssignableFrom(StreamSource.class)) {
                    return (T) toStreamSource(contentBuf, exchange);
                } else if (type.isAssignableFrom(InputStream.class)) {
                    return (T) toInputStream(contentBuf, exchange);
                } else if (type.isAssignableFrom(String.class)) {
                    return (T) toString(contentBuf, exchange);
                }//  w  ww. j  ava  2  s  . co  m
            } catch (UnsupportedEncodingException e) {
                log.error("Error occurred during type conversion", e);
            } finally {
                //Release the buffer if all the content has been consumed
                if (contentBuf.readerIndex() == contentBuf.writerIndex()) {
                    contentBuf.release();
                }
            }
        }

    }
    return null;
}