Example usage for io.netty.buffer ByteBuf retain

List of usage examples for io.netty.buffer ByteBuf retain

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf retain.

Prototype

@Override
    public abstract ByteBuf retain();

Source Link

Usage

From source file:HelloWorldHttp2Handler.java

License:Apache License

/**
 * If receive a frame with end-of-stream set, send a pre-canned response.
 *//*w  ww.  ja v a  2s . c o  m*/
@Override
public void onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream,
        boolean endOfSegment) throws Http2Exception {
    if (endOfStream) {
        sendResponse(ctx(), streamId, data.retain());
    }
}

From source file:IncommingPacketHandler.java

License:Open Source License

@Override
protected void messageReceived(ChannelHandlerContext channelHandlerContext, DatagramPacket packet)
        throws Exception {
    final InetAddress srcAddr = packet.sender().getAddress();
    final ByteBuf buf = packet.content();
    try {//w w  w .  j a v a2s.c o  m

        CapwapEvent event = new CapwapEvent(CapwapEventType.DECODE);
        event.setPacket(packet);
        buf.retain();
        event.setIncomingBuf(buf);
        event.setCtx(channelHandlerContext);
        ;
        msgProcessor.tell(event, this.msgProcessor);

    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:alluxio.client.block.stream.UfsFallbackLocalFileDataWriter.java

License:Apache License

@Override
public void writeChunk(ByteBuf chunk) throws IOException {
    if (mIsWritingToLocal) {
        long pos = mLocalFileDataWriter.pos();
        try {// w  ww .  ja v a  2  s  . c  o  m
            // chunk.refcount++ to ensure chunk not garbage-collected if writeChunk fails
            chunk.retain();
            // chunk.refcount-- inside regardless of exception
            mLocalFileDataWriter.writeChunk(chunk);
            // chunk.refcount-- on success
            chunk.release();
            return;
        } catch (ResourceExhaustedException e) {
            LOG.warn("Fallback to write to UFS for block {} due to a failure of insufficient space "
                    + "on the local worker: {}", mBlockId, e.getMessage());
            mIsWritingToLocal = false;
        }
        try {
            if (pos == 0) {
                // Nothing has been written to temp block, we can cancel this failed local writer and
                // cleanup the temp block.
                mLocalFileDataWriter.cancel();
            } else {
                // Note that, we can not cancel mLocalFileDataWriter now as the cancel message may
                // arrive and clean the temp block before it is written to UFS.
                mLocalFileDataWriter.flush();
            }
            // Close the block writer. We do not close the mLocalFileDataWriter to prevent the worker
            // completes the block, commit it and remove it.
            //mLocalFileDataWriter.getWriter().close();
            mGrpcDataWriter = GrpcDataWriter.create(mContext, mWorkerNetAddress, mBlockId, mBlockSize,
                    RequestType.UFS_FALLBACK_BLOCK, mOutStreamOptions);
            // Instruct the server to write the previously transferred data from temp block to UFS only
            // when there is data already written.
            if (pos > 0) {
                mGrpcDataWriter.writeFallbackInitRequest(pos);
            }
        } catch (Exception e) {
            // chunk.refcount-- on exception
            chunk.release();
            throw new IOException("Failed to switch to writing block " + mBlockId + " to UFS", e);
        }
    }
    mGrpcDataWriter.writeChunk(chunk); // refcount-- inside to release chunk
}

From source file:alluxio.client.block.stream.UfsFallbackLocalFilePacketWriter.java

License:Apache License

@Override
public void writePacket(ByteBuf packet) throws IOException {
    if (mIsWritingToLocal) {
        long pos = mLocalFilePacketWriter.pos();
        try {//from   w w  w . j a  va  2 s  .c  o  m
            // packet.refcount++ to ensure packet not garbage-collected if writePacket fails
            packet.retain();
            // packet.refcount-- inside regardless of exception
            mLocalFilePacketWriter.writePacket(packet);
            // packet.refcount-- on success
            packet.release();
            return;
        } catch (ResourceExhaustedException e) {
            LOG.warn("Fallback to write to UFS for block {} due to a failure of insufficient space "
                    + "on the local worker: {}", mBlockId, e.getMessage());
            mIsWritingToLocal = false;
        }
        try {
            if (pos == 0) {
                // Nothing has been written to temp block, we can cancel this failed local writer and
                // cleanup the temp block.
                mLocalFilePacketWriter.cancel();
            } else {
                // Note that, we can not cancel mLocalFilePacketWriter now as the cancel message may
                // arrive and clean the temp block before it is written to UFS.
                mLocalFilePacketWriter.flush();
            }
            // Close the block writer. We do not close the mLocalFilePacketWriter to prevent the worker
            // completes the block, commit it and remove it.
            //mLocalFilePacketWriter.getWriter().close();
            mNettyPacketWriter = NettyPacketWriter.create(mContext, mWorkerNetAddress, mBlockId, mBlockSize,
                    Protocol.RequestType.UFS_FALLBACK_BLOCK, mOutStreamOptions);
            // Instruct the server to write the previously transferred data from temp block to UFS only
            // when there is data already written.
            if (pos > 0) {
                mNettyPacketWriter.writeFallbackInitPacket(pos);
            }
        } catch (Exception e) {
            // packet.refcount-- on exception
            packet.release();
            throw new IOException("Failed to switch to writing block " + mBlockId + " to UFS", e);
        }
    }
    mNettyPacketWriter.writePacket(packet); // refcount-- inside to release packet
}

From source file:alluxio.grpc.GrpcSerializationUtils.java

License:Apache License

/**
 * Add the given buffers directly to the gRPC output stream.
 *
 * @param buffers the buffers to be added
 * @param stream the output stream//from   w w w .j  ava2  s. c  o m
 * @return whether the buffers are added successfully
 */
public static boolean addBuffersToStream(ByteBuf[] buffers, OutputStream stream) {
    if (!sZeroCopySendSupported || !stream.getClass().equals(sBufferList.getDeclaringClass())) {
        return false;
    }
    try {
        if (sCurrent.get(stream) != null) {
            return false;
        }
        for (ByteBuf buffer : buffers) {
            Object nettyBuffer = sNettyWritableBufferConstructor.newInstance(buffer);
            List list = (List) sBufferList.get(stream);
            list.add(nettyBuffer);
            buffer.retain();
            sCurrent.set(stream, nettyBuffer);
        }
        return true;
    } catch (Exception e) {
        LOG.warn("Failed to add data buffer to stream: {}.", e.getMessage());
        return false;
    }
}

From source file:alluxio.network.protocol.databuffer.DataNettyBuffer.java

License:Apache License

/**
* Constructor for creating a DataNettyBuffer, by passing a Netty ByteBuf.
* This way we avoid one copy from ByteBuf to another ByteBuffer,
* and making sure the buffer would not be recycled.
* IMPORTANT: {@link #release()} must be called after
* reading is finished. Otherwise the memory space for the ByteBuf might never be reclaimed.
*
* @param bytebuf The ByteBuf having the data
* @param length The length of the underlying ByteBuffer data
*///w  w  w .  ja v a  2 s. c  o  m
public DataNettyBuffer(ByteBuf bytebuf, long length) {
    // throws exception if there are multiple nioBuffers, or reference count is not 1
    Preconditions.checkArgument(bytebuf.nioBufferCount() == 1,
            "Number of nioBuffers of this bytebuf is %s (1 expected).", bytebuf.nioBufferCount());
    Preconditions.checkArgument(bytebuf.refCnt() == 1, "Reference count of this bytebuf is %s (1 expected).",
            bytebuf.refCnt());

    // increase the bytebuf reference count so it would not be recycled by Netty
    bytebuf.retain();
    mNettyBuf = bytebuf;
    mBuffer = bytebuf.nioBuffer();
    mLength = length;
}

From source file:alluxio.network.protocol.RPCProtoMessage.java

License:Apache License

/**
 * Decodes the message from a buffer. This method increments the refcount of the bytebuf passed
 * by 1./*from  ww  w  . j a v  a 2 s .  co  m*/
 *
 * @param in the buffer
 * @param prototype a message prototype used to infer the type of the message
 * @return the message decoded
 */
public static RPCProtoMessage decode(ByteBuf in, ProtoMessage.Type prototype) {
    int length = in.readInt();
    byte[] serialized = new byte[length];
    in.readBytes(serialized);
    in.retain();
    return new RPCProtoMessage(serialized, prototype, new DataNettyBufferV2(in));
}

From source file:at.yawk.dbus.protocol.codec.MessageHeaderCodec.java

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf rawBuf, List<Object> out) throws Exception {
    if (toRead != 0) {
        if (rawBuf.readableBytes() < toRead) {
            return;
        }/*  w w w  .j a va 2  s  .  c  om*/
        ByteBuf slice = rawBuf.slice().order(byteOrder);
        slice.writerIndex(slice.readerIndex() + toRead);
        slice.retain();
        AlignableByteBuf decoding = AlignableByteBuf.decoding(slice);
        log.trace("INBOUND {}", decoding);
        out.add(decoding);

        rawBuf.readerIndex(rawBuf.readerIndex() + toRead);
        toRead = 0;
    }

    if (rawBuf.readableBytes() < MIN_HEADER_LENGTH) {
        return;
    }

    rawBuf.markReaderIndex();
    byte endianness = rawBuf.readByte();
    ByteOrder order;
    switch (endianness) {
    case 'l':
        order = ByteOrder.LITTLE_ENDIAN;
        break;
    case 'B':
        order = ByteOrder.BIG_ENDIAN;
        break;
    default:
        throw new DecoderException("Unknown byte order byte " + endianness);
    }

    AlignableByteBuf buf = AlignableByteBuf.decoding(rawBuf.resetReaderIndex().order(order));

    buf.getBuffer().markReaderIndex();
    buf.readByte(); // skip endianness byte we read above

    @Nullable
    MessageType type = MessageType.byId(buf.readByte());
    byte flags = buf.readByte();
    byte majorProtocolVersion = buf.readByte();
    if (majorProtocolVersion != PROTOCOL_VERSION) {
        throw new DecoderException("Unsupported major protocol version " + majorProtocolVersion);
    }
    long bodyLength = buf.readUnsignedInt();
    int serial = buf.readInt();

    MessageHeader header = new MessageHeader();
    header.setByteOrder(order);
    header.setMessageType(type);
    header.setNoReplyExpected((flags & NO_REPLY_EXPECTED) != 0);
    header.setNoAutoStart((flags & NO_AUTO_START) != 0);
    header.setAllowInteractiveAuthorization((flags & ALLOW_INTERACTIVE_AUTHORIZATION) != 0);
    header.setMajorProtocolVersion(majorProtocolVersion);
    header.setMessageBodyLength(bodyLength);
    header.setSerial(serial);
    header.setHeaderFields(new EnumMap<>(HeaderField.class));

    ArrayObject headers = (ArrayObject) tryDecode(HEADER_FIELD_LIST_TYPE, buf);
    if (headers == null) {
        // not enough data
        buf.getBuffer().resetReaderIndex();
        return;
    }
    for (DbusObject struct : headers.getValues()) {
        HeaderField field = HeaderField.byId(struct.get(0).byteValue());
        if (field != null) {
            DbusObject value = struct.get(1).getValue();
            if (!value.getType().equals(field.getType())) {
                throw new DecoderException("Invalid header type on " + field + ": got " + value.getType()
                        + " but expected " + field.getType());
            }
            header.getHeaderFields().put(field, value);
        }
    }

    if (type != null) {
        checkRequiredHeaderFieldsPresent(header);
    }

    if (!buf.canAlignRead(8)) {
        buf.getBuffer().resetReaderIndex();
        return;
    }
    buf.alignRead(8);

    toRead = Math.toIntExact(header.getMessageBodyLength());
    byteOrder = order;
    out.add(header);
}

From source file:c5db.codec.UdpProtostuffEncoder.java

License:Apache License

@Override
protected void encode(ChannelHandlerContext ctx, UdpProtostuffMessage<T> msg, List<Object> out)
        throws Exception {
    LinkBuffer buffer = new LinkBuffer(bufferAllocSize);
    if (protostuffOutput) {
        LowCopyProtostuffOutput lcpo = new LowCopyProtostuffOutput(buffer);
        schema.writeTo(lcpo, msg.message);
    } else {/*from  w  ww  .  j  ava2  s  . c  o  m*/
        LowCopyProtobufOutput lcpo = new LowCopyProtobufOutput(buffer);
        schema.writeTo(lcpo, msg.message);
    }

    List<ByteBuffer> buffers = buffer.finish();
    ByteBuf data = Unpooled.wrappedBuffer(buffers.toArray(new ByteBuffer[buffers.size()]));
    data.retain();

    DatagramPacket dg = new DatagramPacket(data, msg.remoteAddress);
    dg.retain();
    out.add(dg);
}

From source file:com.barchart.netty.common.pipeline.WebSocketBinaryCodec.java

License:BSD License

@Override
protected void encode(final ChannelHandlerContext ctx, final ByteBuf msg, final List<Object> out)
        throws Exception {
    out.add(new BinaryWebSocketFrame(msg.retain()));
}