Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:com.dianping.puma.parser.mysql.event.AbstractBinlogEvent.java

public boolean isRemaining(ByteBuffer buf, PumaContext context) {
    return context.isCheckSum() ? buf.remaining() - 4 > 0 : buf.hasRemaining();
}

From source file:com.dianping.puma.parser.mysql.event.AbstractBinlogEvent.java

public int lenRemaining(ByteBuffer buf, PumaContext context) {
    return context.isCheckSum() ? buf.remaining() - 4 : buf.remaining();
}

From source file:com.gumgum.kafka.consumer.KafkaTemplate.java

private String convertToUtf8String(ByteBuffer buffer) throws Exception {
    byte[] bytes = new byte[buffer.remaining()];
    buffer.get(bytes);//from   ww w. j  a  v  a  2 s . co  m
    return new String(bytes, "UTF-8");
}

From source file:com.openteach.diamond.network.waverider.network.Packet.java

/**
 * ??Packet, ??/*from w  w  w . j a v  a2  s. c om*/
 * @param inputBuffer
 * @return
 * @throws IOException, InterruptedException
 */
public static Packet parse(BlockingQueue<ByteBuffer> inputBuffer, NetWorkEndPoint endPoint,
        SocketChannel channel) throws IOException, InterruptedException {
    // Buffer for packet header
    byte[] tmpBuf = new byte[NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE];
    ByteBuffer header = ByteBuffer.allocate(Packet.getHeaderSize());
    ByteBuffer currentBuffer = null;
    int rest = 0;
    boolean isRemove = false;

    // ?
    while (true) {
        while ((currentBuffer = inputBuffer.peek()) == null) {
            if (!endPoint.notifyRead(channel)) {
                throw new IOException("Socket closed by other thread");
            }
            // ?
            //endPoint.waitMoreData(5);
            // FIXME 2ms
            //Thread.sleep(1);
            Thread.yield();
        }
        isRemove = false;
        rest = header.capacity() - header.position();
        if (currentBuffer.remaining() >= rest) {
            if (currentBuffer.remaining() == rest) {
                isRemove = true;
            }
            currentBuffer.get(tmpBuf, 0, rest);
            header.put(tmpBuf, 0, rest);
            if (isRemove) {
                inputBuffer.remove();
            }
            break;
        } else {
            header.put(currentBuffer);
            inputBuffer.remove();
        }
    }

    header.flip();

    // , ???

    // ?
    Integer size = header.getInt(Packet.getLengthPosition());
    // For test
    /*if(size < 0 || size > 100000) {
       logger.info("Error");
    }*/
    //logger.debug(new StringBuilder("Try to allocate ").append(size).append(" bytes memory"));
    ByteBuffer buffer = ByteBuffer.allocate(size);
    buffer.put(header);
    header.clear();

    // ?
    while (true) {
        while ((currentBuffer = inputBuffer.peek()) == null) {
            endPoint.notifyRead(channel);
            Thread.sleep(1000);
        }
        isRemove = false;
        rest = buffer.capacity() - buffer.position();
        if (currentBuffer.remaining() >= rest) {
            if (currentBuffer.remaining() == rest) {
                isRemove = true;
            }
            currentBuffer.get(tmpBuf, 0, rest);
            buffer.put(tmpBuf, 0, rest);
            if (isRemove) {
                inputBuffer.remove();
            }
            break;
        } else {
            buffer.put(currentBuffer);
            inputBuffer.remove();
        }
    }
    //buffer.position(0);
    buffer.flip();
    Packet packet = Packet.unmarshall(buffer);
    //logger.info("Parse one packet from network");
    //packet.dump();
    return packet;
}

From source file:org.apache.trevni.BZip2Codec.java

@Override
ByteBuffer compress(ByteBuffer uncompressedData) throws IOException {
    ByteArrayOutputStream baos = getOutputBuffer(uncompressedData.remaining());
    BZip2CompressorOutputStream outputStream = new BZip2CompressorOutputStream(baos);

    try {//ww  w.java 2s.c o m
        outputStream.write(uncompressedData.array());
    } finally {
        outputStream.close();
    }

    ByteBuffer result = ByteBuffer.wrap(baos.toByteArray());
    return result;
}

From source file:com.linkedin.databus.core.DbusEventV1.java

private static int serializeFullEvent(long key, ByteBuffer serializationBuffer, DbusEventInfo eventInfo,
        byte[] attributes) {

    ByteBuffer valueBuffer = eventInfo.getValueByteBuffer();
    int payloadLen = (valueBuffer == null) ? eventInfo.getValueLength() : valueBuffer.remaining();

    int startPosition = serializationBuffer.position();
    serializationBuffer.put(DbusEventFactory.DBUS_EVENT_V1).putInt(HeaderCrcDefault)
            .putInt(LongKeyValueOffset + payloadLen).put(attributes).putLong(eventInfo.getSequenceId())
            .putShort(eventInfo.getpPartitionId()).putShort(eventInfo.getlPartitionId())
            .putLong(eventInfo.getTimeStampInNanos()).putShort(eventInfo.getSrcId())
            .put(eventInfo.getSchemaId(), 0, 16).putInt(HeaderCrcDefault).putLong(key);
    if (valueBuffer != null) {
        // note. put will advance position. In the case of wrapped byte[] it is ok, in the case of
        // ByteBuffer this is actually a read only copy of the buffer passed in.
        serializationBuffer.put(valueBuffer);
    }//from  w w  w .  jav a  2  s  .  c o m

    int stopPosition = serializationBuffer.position();

    long valueCrc = ByteBufferCRC32.getChecksum(serializationBuffer, startPosition + LongKeyValueOffset,
            payloadLen);
    Utils.putUnsignedInt(serializationBuffer, startPosition + ValueCrcOffset, valueCrc);

    if (eventInfo.isAutocommit()) {
        //TODO (DDSDBUS-60): Medium : can avoid new here
        DbusEventV1 e = new DbusEventV1(serializationBuffer, startPosition);
        e.applyCrc();
    }

    serializationBuffer.position(stopPosition);
    return (stopPosition - startPosition);
}

From source file:me.carpela.network.pt.cracker.tools.ttorrent.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {

        length += file.length();//from  w w  w.ja v  a 2 s .  c  o  m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    return hashes.toString();
}

From source file:com.esri.geoevent.transport.twitter.TwitterOutboundTransport.java

@Override
public void receive(ByteBuffer bb, String channelId) {
    byte[] data = new byte[bb.remaining()];
    bb.get(data);/*from w  w  w .  ja  v  a  2 s .co  m*/

    postBodyOrg = "status=" + new String(data);
    postBody = OAuth.encodePostBody(postBodyOrg);
    LOGGER.debug(postBody);

    // super.receive(bb, channelId);
    doHttp();
}

From source file:org.apache.avro.file.ZstandardCodec.java

@Override
public ByteBuffer compress(ByteBuffer uncompressedData) throws IOException {
    ByteArrayOutputStream baos = getOutputBuffer(uncompressedData.remaining());
    OutputStream outputStream = new ZstdCompressorOutputStream(baos);
    writeAndClose(uncompressedData, outputStream);
    return ByteBuffer.wrap(baos.toByteArray());
}

From source file:com.linkedin.databus.core.DbusEventV1.java

private static int serializeStringKeyEvent(byte[] key, ByteBuffer serializationBuffer,
        DbusEventInfo eventInfo) {/*from  w  w w. j  av a 2  s  .c  om*/
    ByteBuffer valueBuffer = eventInfo.getValueByteBuffer();
    int payloadLen = (valueBuffer == null) ? eventInfo.getValueLength() : valueBuffer.remaining();

    int startPosition = serializationBuffer.position();
    byte[] attributes = null;

    // Event without explicit opcode specified should always be considered UPSERT or existing code will break
    if (eventInfo.getOpCode() == DbusOpcode.DELETE) {
        if (serializationBuffer.order() == ByteOrder.BIG_ENDIAN)
            attributes = DeleteStringKeyAttributesBigEndian.clone();
        else
            attributes = DeleteStringKeyAttributesLittleEndian.clone();
    } else {
        if (serializationBuffer.order() == ByteOrder.BIG_ENDIAN)
            attributes = UpsertStringKeyAttributesBigEndian.clone();
        else
            attributes = UpsertStringKeyAttributesLittleEndian.clone();
    }

    if (eventInfo.isEnableTracing()) {
        setTraceFlag(attributes, serializationBuffer.order());
    }

    if (eventInfo.isReplicated())
        setExtReplicationFlag(attributes, serializationBuffer.order());

    serializationBuffer.put(DbusEventFactory.DBUS_EVENT_V1).putInt(HeaderCrcDefault)
            .putInt(StringValueOffset(key.length) + payloadLen).put(attributes)
            .putLong(eventInfo.getSequenceId()).putShort(eventInfo.getpPartitionId())
            .putShort(eventInfo.getlPartitionId()).putLong(eventInfo.getTimeStampInNanos())
            .putShort(eventInfo.getSrcId()).put(eventInfo.getSchemaId(), 0, 16).putInt(HeaderCrcDefault)
            .putInt(key.length).put(key);
    if (valueBuffer != null) {
        // note. put will advance position. In the case of wrapped byte[] it is ok, in the case of
        // ByteBuffer this is actually a read only copy of the buffer passed in.
        serializationBuffer.put(valueBuffer);
    }

    int stopPosition = serializationBuffer.position();
    long valueCrc = ByteBufferCRC32.getChecksum(serializationBuffer, startPosition + StringKeyOffset,
            key.length + payloadLen);
    Utils.putUnsignedInt(serializationBuffer, startPosition + ValueCrcOffset, valueCrc);
    if (eventInfo.isAutocommit()) {
        //TODO (DDSDBUS-61): Medium : can avoid new here
        DbusEventV1 e = new DbusEventV1(serializationBuffer, startPosition);
        e.applyCrc();
    }

    serializationBuffer.position(stopPosition);
    return (stopPosition - startPosition);
}