Example usage for java.nio ByteBuffer arrayOffset

List of usage examples for java.nio ByteBuffer arrayOffset

Introduction

In this page you can find the example usage for java.nio ByteBuffer arrayOffset.

Prototype

public final int arrayOffset() 

Source Link

Document

Returns the offset of the byte array which this buffer is based on, if there is one.

Usage

From source file:org.apache.hadoop.hbase.regionserver.TestHStoreFile.java

private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f) throws IOException {
    f.initReader();//from ww w.j ava 2 s . c om
    Cell midkey = f.getReader().midkey();
    KeyValue midKV = (KeyValue) midkey;
    byte[] midRow = CellUtil.cloneRow(midKV);
    // Create top split.
    HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow);
    Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
    // Create bottom split.
    HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null);
    Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
    // Make readers on top and bottom.
    HStoreFile topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true);
    topF.initReader();
    StoreFileReader top = topF.getReader();
    HStoreFile bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true);
    bottomF.initReader();
    StoreFileReader bottom = bottomF.getReader();
    ByteBuffer previous = null;
    LOG.info("Midkey: " + midKV.toString());
    ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midKV.getKey());
    try {
        // Now make two HalfMapFiles and assert they can read the full backing
        // file, one from the top and the other from the bottom.
        // Test bottom half first.
        // Now test reading from the top.
        boolean first = true;
        ByteBuffer key = null;
        HFileScanner topScanner = top.getScanner(false, false);
        while ((!topScanner.isSeeked() && topScanner.seekTo())
                || (topScanner.isSeeked() && topScanner.next())) {
            key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());

            if ((topScanner.getReader().getComparator().compare(midKV, key.array(), key.arrayOffset(),
                    key.limit())) > 0) {
                fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey);
            }
            if (first) {
                first = false;
                LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key)));
            }
        }
        LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));

        first = true;
        HFileScanner bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            if (first) {
                first = false;
                LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
            }
            assertTrue(key.compareTo(bbMidkeyBytes) < 0);
        }
        if (previous != null) {
            LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);

        // Next test using a midkey that does not exist in the file.
        // First, do a key that is < than first key. Ensure splits behave
        // properly.
        byte[] badmidkey = Bytes.toBytes("  .");
        assertTrue(fs.exists(f.getPath()));
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);

        assertNull(bottomPath);

        topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true);
        topF.initReader();
        top = topF.getReader();
        // Now read from the top.
        first = true;
        topScanner = top.getScanner(false, false);
        KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue();
        while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) {
            key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());
            keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit());
            assertTrue(topScanner.getReader().getComparator().compare(keyOnlyKV, badmidkey, 0,
                    badmidkey.length) >= 0);
            if (first) {
                first = false;
                KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
                LOG.info("First top when key < bottom: " + keyKV);
                String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
        LOG.info("Last top when key < bottom: " + keyKV);
        String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(tmp.charAt(i) == 'z');
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);

        // Test when badkey is > than last key in file ('||' > 'zz').
        badmidkey = Bytes.toBytes("|||");
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
        assertNull(topPath);

        bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true);
        bottomF.initReader();
        bottom = bottomF.getReader();
        first = true;
        bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            if (first) {
                first = false;
                keyKV = KeyValueUtil.createKeyValueFromKey(key);
                LOG.info("First bottom when key > top: " + keyKV);
                tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        keyKV = KeyValueUtil.createKeyValueFromKey(key);
        LOG.info("Last bottom when key > top: " + keyKV);
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength())
                    .charAt(i) == 'z');
        }
    } finally {
        if (top != null) {
            top.close(true); // evict since we are about to delete the file
        }
        if (bottom != null) {
            bottom.close(true); // evict since we are about to delete the file
        }
        fs.delete(f.getPath(), true);
    }
}

From source file:org.apache.hadoop.hbase.KeyValue.java

/**
 * @param bb/*  w  w w .  j a  v a 2 s.c om*/
 * @return A KeyValue made of a byte buffer that holds the key-only part.
 * Needed to convert hfile index members to KeyValues.
 */
public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
    return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
}

From source file:de.hpi.fgis.hdrs.Triple.java

public void readFields(ByteBuffer header, ByteBuffer data) {
    // read header
    Slen = header.getShort();//  w  ww .j  a  va 2 s  .  co m
    Plen = header.getShort();
    Olen = header.getInt();
    multiplicity = header.getInt();
    // read data
    this.buffer = data.array();
    int size = (int) Slen + (int) Plen + Olen;
    offset = data.arrayOffset() + data.position();
    data.position(data.position() + size);
}

From source file:de.hpi.fgis.hdrs.Triple.java

public void readFields(ByteBuffer buffer) {
    // read header
    Slen = buffer.getShort();/*from   www .ja va  2 s  .  co  m*/
    Plen = buffer.getShort();
    Olen = buffer.getInt();
    multiplicity = buffer.getInt();
    // read data
    this.buffer = buffer.array();
    int size = (int) Slen + (int) Plen + Olen;
    offset = buffer.arrayOffset() + buffer.position();
    buffer.position(buffer.position() + size);
}

From source file:org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext.java

@Override
public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
        ByteBuffer blockBufferWithoutHeader, byte[] onDiskBlock, int offset) throws IOException {
    InputStream in = new DataInputStream(
            new ByteArrayInputStream(onDiskBlock, offset, onDiskSizeWithoutHeader));

    Encryption.Context cryptoContext = fileContext.getEncryptionContext();
    if (cryptoContext != Encryption.Context.NONE) {

        Cipher cipher = cryptoContext.getCipher();
        Decryptor decryptor = cipher.getDecryptor();
        decryptor.setKey(cryptoContext.getKey());

        // Encrypted block format:
        // +--------------------------+
        // | byte iv length           |
        // +--------------------------+
        // | iv data ...              |
        // +--------------------------+
        // | encrypted block data ... |
        // +--------------------------+

        int ivLength = in.read();
        if (ivLength > 0) {
            byte[] iv = new byte[ivLength];
            IOUtils.readFully(in, iv);// w w  w.ja  v  a2 s .c  o m
            decryptor.setIv(iv);
            // All encrypted blocks will have a nonzero IV length. If we see an IV
            // length of zero, this means the encoding context had 0 bytes of
            // plaintext to encode.
            decryptor.reset();
            in = decryptor.createDecryptionStream(in);
        }
        onDiskSizeWithoutHeader -= Bytes.SIZEOF_BYTE + ivLength;
    }

    Compression.Algorithm compression = fileContext.getCompression();
    if (compression != Compression.Algorithm.NONE) {
        Compression.decompress(blockBufferWithoutHeader.array(), blockBufferWithoutHeader.arrayOffset(), in,
                onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, compression);
    } else {
        IOUtils.readFully(in, blockBufferWithoutHeader.array(), blockBufferWithoutHeader.arrayOffset(),
                onDiskSizeWithoutHeader);
    }
}

From source file:org.apache.hadoop.hbase.io.hfile.HFileBlock.java

/**
 * Always allocates a new buffer of the correct size. Copies header bytes
 * from the existing buffer. Does not change header fields.
 * Reserve room to keep checksum bytes too.
 *//*from   ww  w . j  a v  a  2 s  . c  o  m*/
private void allocateBuffer() {
    int cksumBytes = totalChecksumBytes();
    int headerSize = headerSize();
    int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + cksumBytes;

    // TODO we need consider allocating offheap here?
    ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded);

    // Copy header bytes into newBuf.
    // newBuf is HBB so no issue in calling array()
    buf.position(0);
    buf.get(newBuf.array(), newBuf.arrayOffset(), headerSize);

    buf = new SingleByteBuff(newBuf);
    // set limit to exclude next block's header
    buf.limit(headerSize + uncompressedSizeWithoutHeader + cksumBytes);
}

From source file:com.blm.orc.RecordReaderImpl.java

OrcProto.StripeFooter readStripeFooter(StripeInformation stripe) throws IOException {
    long offset = stripe.getOffset() + stripe.getIndexLength() + stripe.getDataLength();
    int tailLength = (int) stripe.getFooterLength();

    // read the footer
    ByteBuffer tailBuf = ByteBuffer.allocate(tailLength);
    file.seek(offset);/*from  ww w. j av a2 s. c  o  m*/
    file.readFully(tailBuf.array(), tailBuf.arrayOffset(), tailLength);
    return OrcProto.StripeFooter.parseFrom(InStream.create("footer", new ByteBuffer[] { tailBuf },
            new long[] { 0 }, tailLength, codec, bufferSize));
}

From source file:org.apache.hadoop.hdfs.server.datanode.DWRRBlockReceiver.java

/**
 * Receives and processes a packet. It can contain many chunks.
 * returns the number of data bytes that the packet has.
 *//*w  w  w . ja va2  s . co m*/
private int receivePacket() throws IOException {
    // read the next packet
    packetReceiver.receiveNextPacket(in);

    PacketHeader header = packetReceiver.getHeader();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Receiving one packet for block " + block + ": " + header);
    }

    // Sanity check the header
    if (header.getOffsetInBlock() > replicaInfo.getNumBytes()) {
        throw new IOException("Received an out-of-sequence packet for " + block + "from " + inAddr
                + " at offset " + header.getOffsetInBlock() + ". Expecting packet starting at "
                + replicaInfo.getNumBytes());
    }
    if (header.getDataLen() < 0) {
        throw new IOException("Got wrong length during writeBlock(" + block + ") from " + inAddr + " at offset "
                + header.getOffsetInBlock() + ": " + header.getDataLen());
    }

    long offsetInBlock = header.getOffsetInBlock();
    long seqno = header.getSeqno();
    boolean lastPacketInBlock = header.isLastPacketInBlock();
    int len = header.getDataLen();
    boolean syncBlock = header.getSyncBlock();

    // avoid double sync'ing on close
    if (syncBlock && lastPacketInBlock) {
        this.syncOnClose = false;
    }

    // update received bytes
    long firstByteInBlock = offsetInBlock;
    offsetInBlock += len;
    if (replicaInfo.getNumBytes() < offsetInBlock) {
        replicaInfo.setNumBytes(offsetInBlock);
    }

    // put in queue for pending acks, unless sync was requested
    if (responder != null && !syncBlock && !shouldVerifyChecksum()) {
        ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock,
                Status.SUCCESS);
    }

    //First write the packet to the mirror:
    if (mirrorOut != null && !mirrorError) {
        try {
            long begin = Time.monotonicNow();
            packetReceiver.mirrorPacketTo(mirrorOut);
            mirrorOut.flush();
            long duration = Time.monotonicNow() - begin;
            if (duration > datanodeSlowLogThresholdMs) {
                LOG.warn("Slow DWRRBlockReceiver write packet to mirror took " + duration + "ms (threshold="
                        + datanodeSlowLogThresholdMs + "ms)");
            }
        } catch (IOException e) {
            handleMirrorOutError(e);
        }
    }

    ByteBuffer dataBuf = packetReceiver.getDataSlice();
    ByteBuffer checksumBuf = packetReceiver.getChecksumSlice();

    if (lastPacketInBlock || len == 0) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Receiving an empty packet or the end of the block " + block);
        }
        // sync block if requested
        //      if (syncBlock) {
        //        flushOrSync(true);
        //      }
    } else {
        int checksumLen = ((len + bytesPerChecksum - 1) / bytesPerChecksum) * checksumSize;

        if (checksumBuf.capacity() != checksumLen) {
            throw new IOException("Length of checksums in packet " + checksumBuf.capacity()
                    + " does not match calculated checksum " + "length " + checksumLen);
        }

        if (shouldVerifyChecksum()) {
            try {
                verifyChunks(dataBuf, checksumBuf);
            } catch (IOException ioe) {
                // checksum error detected locally. there is no reason to continue.
                if (responder != null) {
                    try {
                        ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock,
                                offsetInBlock, Status.ERROR_CHECKSUM);
                        // Wait until the responder sends back the response
                        // and interrupt this thread.
                        Thread.sleep(3000);
                    } catch (InterruptedException e) {
                    }
                }
                throw new IOException("Terminating due to a checksum error." + ioe);
            }

            if (needsChecksumTranslation) {
                // overwrite the checksums in the packet buffer with the
                // appropriate polynomial for the disk storage.
                translateChunks(dataBuf, checksumBuf);
            }
        }

        // by this point, the data in the buffer uses the disk checksum

        try {
            long onDiskLen = replicaInfo.getBytesOnDisk();
            if (onDiskLen < offsetInBlock) {
                //finally write to the disk :

                //          if (onDiskLen % bytesPerChecksum != 0) {
                //            // prepare to overwrite last checksum
                //            adjustCrcFilePosition();
                //          }

                // If this is a partial chunk, then read in pre-existing checksum
                if (firstByteInBlock % bytesPerChecksum != 0) {
                    LOG.info("Packet starts at " + firstByteInBlock + " for " + block
                            + " which is not a multiple of bytesPerChecksum " + bytesPerChecksum);
                    long offsetInChecksum = BlockMetadataHeader.getHeaderSize()
                            + onDiskLen / bytesPerChecksum * checksumSize;
                    computePartialChunkCrc(onDiskLen, offsetInChecksum, bytesPerChecksum);
                }

                int startByteToDisk = (int) (onDiskLen - firstByteInBlock) + dataBuf.arrayOffset()
                        + dataBuf.position();

                int numBytesToDisk = (int) (offsetInBlock - onDiskLen);

                toBeWritten.add(new DWRRWriteRequest(dataBuf.array().clone(), startByteToDisk, numBytesToDisk,
                        checksumBuf.duplicate(), checksumBuf.array().clone(), checksumLen, len, offsetInBlock,
                        syncBlock, lastPacketInBlock));

                ChunkChecksum last = replicaInfo.getLastChecksumAndDataLen();

                if (offsetInBlock % bytesPerChecksum != 0) {
                    LOG.error("CAMAMILLA " + this + "  mod onDiskLen petara offsetInBlock " + offsetInBlock
                            + " bytesPerChecksum " + bytesPerChecksum); // TODO TODO log
                }
                replicaInfo.setLastChecksumAndDataLen(offsetInBlock, last.getChecksum());

            }
        } catch (IOException iex) {
            datanode.checkDiskError();
            throw iex;
        }
    }

    // if sync was requested, put in queue for pending acks here
    // (after the fsync finished)
    if (responder != null && (syncBlock || shouldVerifyChecksum())) {
        LOG.info("CAMAMILLA " + this + "  PacketResponder enqueue ack al llegir de xarxa 1"); // TODO TODO log
        ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock,
                Status.SUCCESS);
    }

    if (throttler != null) { // throttle I/O
        throttler.throttle(len);
    }

    return lastPacketInBlock ? -1 : len;
}