Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.apache.carbondata.core.util.CarbonUtil.java

/**
 * Below method will be used to get the surrogate key
 *
 * @param data   actual data/*from   ww w  .  j a v a  2  s  .c  o m*/
 * @param buffer byte buffer which will be used to convert the data to integer value
 * @return surrogate key
 */
public static int getSurrogateKey(byte[] data, ByteBuffer buffer) {
    int lenght = 4 - data.length;
    for (int i = 0; i < lenght; i++) {
        buffer.put((byte) 0);
    }
    buffer.put(data);
    buffer.rewind();
    int surrogate = buffer.getInt();
    buffer.clear();
    return surrogate;
}

From source file:com.turn.ttorrent.client.ConnectionHandler.java

/**
 * Validate an expected handshake on a connection.
 *
 * <p>/*from w  ww.  j  a v a 2  s  . c  o m*/
 * Reads an expected handshake message from the given connected socket,
 * parses it and validates that the torrent hash_info corresponds to the
 * torrent we're sharing, and that the peerId matches the peer ID we expect
 * to see coming from the remote peer.
 * </p>
 *
 * @param channel The connected socket channel to the remote peer.
 * @param peerId The peer ID we expect in the handshake. If <em>null</em>,
 * any peer ID is accepted (this is the case for incoming connections).
 * @return The validated handshake message object.
 */
private Handshake validateHandshake(SocketChannel channel, byte[] peerId) throws IOException, ParseException {
    ByteBuffer len = ByteBuffer.allocate(1);
    ByteBuffer data;

    // Read the handshake from the wire
    logger.trace("Reading handshake size (1 byte) from {}...", this.socketRepr(channel));
    if (channel.read(len) < len.capacity()) {
        throw new IOException("Handshake size read underrrun");
    }

    len.rewind();
    int pstrlen = len.get();

    data = ByteBuffer.allocate(Handshake.BASE_HANDSHAKE_LENGTH + pstrlen);
    data.put((byte) pstrlen);
    int expected = data.remaining();
    int read = channel.read(data);
    if (read < expected) {
        throw new IOException("Handshake data read underrun (" + read + " < " + expected + " bytes)");
    }

    // Parse and check the handshake
    data.rewind();
    Handshake hs = Handshake.parse(data);
    if (!Arrays.equals(hs.getInfoHash(), this.torrent.getInfoHash())) {
        throw new ParseException("Handshake for unknow torrent " + Utils.bytesToHex(hs.getInfoHash()) + " from "
                + this.socketRepr(channel) + ".", pstrlen + 9);
    }

    if (peerId != null && !Arrays.equals(hs.getPeerId(), peerId)) {
        throw new ParseException("Announced peer ID " + Utils.bytesToHex(hs.getPeerId())
                + " did not match expected peer ID " + Utils.bytesToHex(peerId) + ".", pstrlen + 29);
    }

    return hs;
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockCompatibility.java

/**
 * Test encoding/decoding data blocks./*from  w ww.j  av  a 2s.  c o m*/
 * @throws IOException a bug or a problem with temporary files.
 */
@Test
public void testDataBlockEncoding() throws IOException {
    if (includesTag) {
        TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
    }
    final int numBlocks = 5;
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
                LOG.info("testDataBlockEncoding algo " + algo + " pread = " + pread + " encoding " + encoding);
                Path path = new Path(TEST_UTIL.getDataTestDir(),
                        "blocks_v2_" + algo + "_" + encoding.toString());
                FSDataOutputStream os = fs.create(path);
                HFileDataBlockEncoder dataBlockEncoder = (encoding != DataBlockEncoding.NONE)
                        ? new HFileDataBlockEncoderImpl(encoding)
                        : NoOpDataBlockEncoder.INSTANCE;
                TestHFileBlockCompatibility.Writer hbw = new TestHFileBlockCompatibility.Writer(algo,
                        dataBlockEncoder, includesMemstoreTS, includesTag);
                long totalSize = 0;
                final List<Integer> encodedSizes = new ArrayList<Integer>();
                final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    hbw.startWriting(BlockType.DATA);
                    TestHFileBlock.writeTestKeyValues(hbw, blockId, pread, includesTag);
                    hbw.writeHeaderAndData(os);
                    int headerLen = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
                    byte[] encodedResultWithHeader = hbw.getUncompressedDataWithHeader();
                    final int encodedSize = encodedResultWithHeader.length - headerLen;
                    if (encoding != DataBlockEncoding.NONE) {
                        // We need to account for the two-byte encoding algorithm ID that
                        // comes after the 24-byte block header but before encoded KVs.
                        headerLen += DataBlockEncoding.ID_SIZE;
                    }
                    byte[] encodedDataSection = new byte[encodedResultWithHeader.length - headerLen];
                    System.arraycopy(encodedResultWithHeader, headerLen, encodedDataSection, 0,
                            encodedDataSection.length);
                    final ByteBuffer encodedBuf = ByteBuffer.wrap(encodedDataSection);
                    encodedSizes.add(encodedSize);
                    encodedBlocks.add(encodedBuf);
                    totalSize += hbw.getOnDiskSizeWithHeader();
                }
                os.close();

                FSDataInputStream is = fs.open(path);
                HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
                        .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag)
                        .withCompression(algo).build();
                HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(new FSDataInputStreamWrapper(is),
                        totalSize, fs, path, meta);
                hbr.setDataBlockEncoder(dataBlockEncoder);
                hbr.setIncludesMemstoreTS(includesMemstoreTS);

                HFileBlock b;
                int pos = 0;
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    b = hbr.readBlockData(pos, -1, -1, pread);
                    b.sanityCheck();
                    pos += b.getOnDiskSizeWithHeader();

                    assertEquals((int) encodedSizes.get(blockId), b.getUncompressedSizeWithoutHeader());
                    ByteBuffer actualBuffer = b.getBufferWithoutHeader();
                    if (encoding != DataBlockEncoding.NONE) {
                        // We expect a two-byte big-endian encoding id.
                        assertEquals(0, actualBuffer.get(0));
                        assertEquals(encoding.getId(), actualBuffer.get(1));
                        actualBuffer.position(2);
                        actualBuffer = actualBuffer.slice();
                    }

                    ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
                    expectedBuffer.rewind();

                    // test if content matches, produce nice message
                    TestHFileBlock.assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding, pread);
                }
                is.close();
            }
        }
    }
}

From source file:com.bittorrent.mpetazzoni.client.ConnectionHandler.java

/**
 * Validate an expected handshake on a connection.
 *
 * <p>//from   w  w  w.ja va 2  s .  c om
 * Reads an expected handshake message from the given connected socket,
 * parses it and validates that the torrent hash_info corresponds to the
 * torrent we're sharing, and that the peerId matches the peer ID we expect
 * to see coming from the remote peer.
 * </p>
 *
 * @param channel The connected socket channel to the remote peer.
 * @param peerId The peer ID we expect in the handshake. If <em>null</em>,
 * any peer ID is accepted (this is the case for incoming connections).
 * @return The validated handshake message object.
 */
private Handshake validateHandshake(SocketChannel channel, byte[] peerId) throws IOException, ParseException {
    ByteBuffer len = ByteBuffer.allocate(1);
    ByteBuffer data;

    // Read the handshake from the wire
    logger.trace("Reading handshake size (1 byte) from {}...", this.socketRepr(channel));
    if (channel.read(len) < len.capacity()) {
        throw new IOException("Handshake size read underrrun");
    }

    len.rewind();
    int pstrlen = len.get();

    data = ByteBuffer.allocate(Handshake.BASE_HANDSHAKE_LENGTH + pstrlen);
    data.put((byte) pstrlen);
    int expected = data.remaining();
    int read = channel.read(data);
    if (read < expected) {
        throw new IOException("Handshake data read underrun (" + read + " < " + expected + " bytes)");
    }

    // Parse and check the handshake
    data.rewind();
    Handshake hs = Handshake.parse(data);
    if (!Arrays.equals(hs.getInfoHash(), this.torrent.getInfoHash())) {
        throw new ParseException("Handshake for unknow torrent "
                + Torrent.byteArrayToHexString(hs.getInfoHash()) + " from " + this.socketRepr(channel) + ".",
                pstrlen + 9);
    }

    if (peerId != null && !Arrays.equals(hs.getPeerId(), peerId)) {
        throw new ParseException(
                "Announced peer ID " + Torrent.byteArrayToHexString(hs.getPeerId())
                        + " did not match expected peer ID " + Torrent.byteArrayToHexString(peerId) + ".",
                pstrlen + 29);
    }

    return hs;
}

From source file:org.opendaylight.controller.topology.web.Topology.java

/**
 * Add regular hosts to main topology/*from w  w  w.ja v a2s .com*/
 *
 * @param hostEdges - node-nodeconnectors host-specific mapping from topology
 * @param topology - topology instance
 */
private void addHostNodes(Map<Node, Set<NodeConnector>> hostEdges, ITopologyManager topology,
        String containerName) {
    for (Map.Entry<Node, Set<NodeConnector>> e : hostEdges.entrySet()) {
        for (NodeConnector connector : e.getValue()) {
            List<Host> hosts = topology.getHostsAttachedToNodeConnector(connector);
            for (Host host : hosts) {
                EthernetAddress dmac = (EthernetAddress) host.getDataLayerAddress();

                ByteBuffer addressByteBuffer = ByteBuffer.allocate(8);
                addressByteBuffer.putShort((short) 0);
                addressByteBuffer.put(dmac.getValue());
                addressByteBuffer.rewind();

                long hid = addressByteBuffer.getLong();
                String hostId = String.valueOf(hid);

                NodeBean hostBean = new NodeBean(hostId, host.getNetworkAddressAsString(), NodeType.HOST);
                List<Map<String, Object>> adjacencies = new LinkedList<Map<String, Object>>();
                EdgeBean edge = new EdgeBean(connector, hid);
                adjacencies.add(edge.out());
                hostBean.setLinks(adjacencies);

                if (metaCache.get(containerName).containsKey(hostId)) {
                    Map<String, Object> hostEntry = metaCache.get(containerName).get(hostId);
                    hostEntry.put("adjacencies", adjacencies);
                    stagedNodes.put(hostId, hostEntry);
                } else {
                    newNodes.put(String.valueOf(hid), hostBean.out());
                }
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java

byte[] getBytesFromResponse(ByteString response) {
    ByteBuffer bb = response.asReadOnlyByteBuffer();
    bb.rewind();
    byte[] bytes;
    if (bb.hasArray()) {
        bytes = bb.array();//from  www .jav a2s .  co m
    } else {
        bytes = response.toByteArray();
    }
    return bytes;
}

From source file:org.apache.hadoop.raid.IADecoder.java

ReadPackage readFromInputs(FSDataInputStream[] inputs, int[] validErasedLocations, IAStreamFactory sf, int seq)
        throws IOException {
    boolean flag = true;
    while (flag) {
        flag = false;/*from   w  ww .j  a v a  2 s . c om*/
        // For every input, read some data = bufSize
        for (int i = 0, j = 0; i < inputs.length; i++) {
            if (j >= validErasedLocations.length || i != validErasedLocations[j]) {
                try {
                    LOG.info("read input:" + i + " encoded bs:" + encodedBufSize + " " + System.nanoTime());
                    RaidUtils.readTillEnd(inputs[i], readBufs[i], encodedBufSize, true);
                    continue;
                } catch (BlockMissingException e) {
                    LOG.error("Encountered BlockMissingException in stream " + i);
                } catch (ChecksumException e) {
                    LOG.error("Encountered ChecksumException in stream " + i);
                }
            } else {
                j++;
                continue;
            }

            // too many fails
            if (validErasedLocations.length == paritySize) {
                String msg = "Too many read errors";
                LOG.error(msg);
                throw new IOException(msg);
            }

            // read fail, need to rebuild the stream.
            int[] newErasedLocations = new int[validErasedLocations.length + 1];
            for (int k = 0; k < validErasedLocations.length; k++) {
                newErasedLocations[k] = validErasedLocations[k];
            }
            newErasedLocations[newErasedLocations.length - 1] = i;
            int[] temp = new int[stripeSize + paritySize + 1];
            iaValidate(stripeSize, paritySize, newErasedLocations.length, newErasedLocations, temp);
            LOG.info("iaValidate pass 3");
            validErasedLocations = new int[temp[0]];
            encodedBufSize = bufSize * validErasedLocations.length / paritySize;
            System.arraycopy(temp, 1, validErasedLocations, 0, temp[0]);
            Arrays.sort(validErasedLocations);

            sf.closeStreams(inputs);
            sf.buildStream(inputs, validErasedLocations, seq * bufSize);
            //reset
            startTime = System.nanoTime();
            flag = true;
            break;
        }
    }

    int failNum = validErasedLocations.length;
    int bufOffset = encodedBufSize * (stripeSize + paritySize - failNum);
    ByteBuffer buf = ByteBuffer.allocate(bufOffset + 64);
    buf.putInt(bufOffset, seq);
    buf.rewind();

    LOG.info("end read encoded bs:" + encodedBufSize + " " + System.nanoTime());
    for (int i = 0, j = 0; i < inputs.length; i++)
        if (j >= validErasedLocations.length || i != validErasedLocations[j])
            buf.put(readBufs[i], 0, encodedBufSize);
        else
            j++;
    return new ReadPackage(validErasedLocations, buf);
}

From source file:org.apache.carbondata.core.util.CarbonUtil.java

/**
 * This method will form one single byte [] for all the high card dims.
 * First it will add all the indexes of variable length byte[] and then the
 * actual value//from   w  w w .jav  a  2 s  .c  o m
 *
 * @param byteBufferArr
 * @return byte[] key.
 */
public static byte[] packByteBufferIntoSingleByteArray(ByteBuffer[] byteBufferArr) {
    // for empty array means there is no data to remove dictionary.
    if (null == byteBufferArr || byteBufferArr.length == 0) {
        return null;
    }
    int noOfCol = byteBufferArr.length;
    short offsetLen = (short) (noOfCol * 2);
    int totalBytes = calculateTotalBytes(byteBufferArr) + offsetLen;
    ByteBuffer buffer = ByteBuffer.allocate(totalBytes);
    // writing the offset of the first element.
    buffer.putShort(offsetLen);

    // prepare index for byte []
    for (int index = 0; index < byteBufferArr.length - 1; index++) {
        ByteBuffer individualCol = byteBufferArr[index];
        int noOfBytes = individualCol.capacity();
        buffer.putShort((short) (offsetLen + noOfBytes));
        offsetLen += noOfBytes;
        individualCol.rewind();
    }

    // put actual data.
    for (int index = 0; index < byteBufferArr.length; index++) {
        ByteBuffer individualCol = byteBufferArr[index];
        buffer.put(individualCol.array());
    }

    buffer.rewind();
    return buffer.array();

}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

private void testInternals() throws IOException {
    final int numBlocks = 5;
    if (includesTag) {
        TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
    }/*  w  ww  .  j av  a  2s. co m*/
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
                Path path = new Path(TEST_UTIL.getDataTestDir(),
                        "blocks_v2_" + algo + "_" + encoding.toString());
                FSDataOutputStream os = fs.create(path);
                HFileDataBlockEncoder dataBlockEncoder = (encoding != DataBlockEncoding.NONE)
                        ? new HFileDataBlockEncoderImpl(encoding)
                        : NoOpDataBlockEncoder.INSTANCE;
                HFileContext meta = new HFileContextBuilder().withCompression(algo)
                        .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag)
                        .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                        .withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE).build();
                HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder, meta);
                long totalSize = 0;
                final List<Integer> encodedSizes = new ArrayList<Integer>();
                final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    hbw.startWriting(BlockType.DATA);
                    writeTestKeyValues(hbw, blockId, includesMemstoreTS, includesTag);
                    hbw.writeHeaderAndData(os);
                    int headerLen = HConstants.HFILEBLOCK_HEADER_SIZE;
                    byte[] encodedResultWithHeader = hbw.getUncompressedBufferWithHeader().array();
                    final int encodedSize = encodedResultWithHeader.length - headerLen;
                    if (encoding != DataBlockEncoding.NONE) {
                        // We need to account for the two-byte encoding algorithm ID that
                        // comes after the 24-byte block header but before encoded KVs.
                        headerLen += DataBlockEncoding.ID_SIZE;
                    }
                    byte[] encodedDataSection = new byte[encodedResultWithHeader.length - headerLen];
                    System.arraycopy(encodedResultWithHeader, headerLen, encodedDataSection, 0,
                            encodedDataSection.length);
                    final ByteBuffer encodedBuf = ByteBuffer.wrap(encodedDataSection);
                    encodedSizes.add(encodedSize);
                    encodedBlocks.add(encodedBuf);
                    totalSize += hbw.getOnDiskSizeWithHeader();
                }
                os.close();

                FSDataInputStream is = fs.open(path);
                meta = new HFileContextBuilder().withHBaseCheckSum(true).withCompression(algo)
                        .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).build();
                HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, totalSize, meta);
                hbr.setDataBlockEncoder(dataBlockEncoder);
                hbr.setIncludesMemstoreTS(includesMemstoreTS);
                HFileBlock b;
                int pos = 0;
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    b = hbr.readBlockData(pos, -1, -1, pread);
                    assertEquals(0, HFile.getChecksumFailuresCount());
                    b.sanityCheck();
                    pos += b.getOnDiskSizeWithHeader();
                    assertEquals((int) encodedSizes.get(blockId), b.getUncompressedSizeWithoutHeader());
                    ByteBuffer actualBuffer = b.getBufferWithoutHeader();
                    if (encoding != DataBlockEncoding.NONE) {
                        // We expect a two-byte big-endian encoding id.
                        assertEquals(0, actualBuffer.get(0));
                        assertEquals(encoding.getId(), actualBuffer.get(1));
                        actualBuffer.position(2);
                        actualBuffer = actualBuffer.slice();
                    }

                    ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
                    expectedBuffer.rewind();

                    // test if content matches, produce nice message
                    assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding, pread);
                }
                is.close();
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java

/**
 * It gives the row count, by summing up the individual results obtained from
 * regions. In case the qualifier is null, FirstKeyValueFilter is used to
 * optimised the operation. In case qualifier is provided, I can't use the
 * filter as it may set the flag to skip to next row, but the value read is
 * not of the given filter: in this case, this particular row will not be
 * counted ==> an error./*  w  w w  .j a  v a 2s.  c om*/
 * @param table
 * @param ci
 * @param scan
 * @return <R, S>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final HTable table,
        final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true);
    class RowNumCallback implements Batch.Callback<Long> {
        private final AtomicLong rowCountL = new AtomicLong(0);

        public long getRowNumCount() {
            return rowCountL.get();
        }

        @Override
        public void update(byte[] region, byte[] row, Long result) {
            rowCountL.addAndGet(result.longValue());
        }
    }
    RowNumCallback rowNum = new RowNumCallback();
    table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<AggregateService, Long>() {
                @Override
                public Long call(AggregateService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>();
                    instance.getRowNum(controller, requestArg, rpcCallback);
                    AggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    byte[] bytes = getBytesFromResponse(response.getFirstPart(0));
                    ByteBuffer bb = ByteBuffer.allocate(8).put(bytes);
                    bb.rewind();
                    return bb.getLong();
                }
            }, rowNum);
    return rowNum.getRowNumCount();
}