Example usage for java.nio ByteBuffer arrayOffset

List of usage examples for java.nio ByteBuffer arrayOffset

Introduction

In this page you can find the example usage for java.nio ByteBuffer arrayOffset.

Prototype

public final int arrayOffset() 

Source Link

Document

Returns the offset of the byte array which this buffer is based on, if there is one.

Usage

From source file:co.cask.cdap.client.rest.RestStreamWriter.java

@Override
public ListenableFuture<Void> write(ByteBuffer buffer, Map<String, String> headers)
        throws IllegalArgumentException {
    Preconditions.checkArgument(buffer != null, "ByteBuffer parameter is null.");
    HttpEntity content;//  ww  w  .j  av a  2  s .c om
    if (buffer.hasArray()) {
        content = new ByteArrayEntity(buffer.array(), buffer.arrayOffset() + buffer.position(),
                buffer.remaining());
    } else {
        byte[] bytes = new byte[buffer.remaining()];
        buffer.get(bytes);
        content = new ByteArrayEntity(bytes);
    }
    return write(content, headers);
}

From source file:org.apache.hadoop.hbase.io.HalfHFileReader.java

@Override
public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread) {
    final HFileScanner s = super.getScanner(cacheBlocks, pread);
    return new HFileScanner() {
        final HFileScanner delegate = s;
        public boolean atEnd = false;

        public ByteBuffer getKey() {
            if (atEnd)
                return null;
            return delegate.getKey();
        }/*from   w  w  w .ja  v  a2s  .  c o  m*/

        public String getKeyString() {
            if (atEnd)
                return null;

            return delegate.getKeyString();
        }

        public ByteBuffer getValue() {
            if (atEnd)
                return null;

            return delegate.getValue();
        }

        public String getValueString() {
            if (atEnd)
                return null;

            return delegate.getValueString();
        }

        public KeyValue getKeyValue() {
            if (atEnd)
                return null;

            return delegate.getKeyValue();
        }

        public boolean next() throws IOException {
            if (atEnd)
                return false;

            boolean b = delegate.next();
            if (!b) {
                return b;
            }
            // constrain the bottom.
            if (!top) {
                ByteBuffer bb = getKey();
                if (getComparator().compare(bb.array(), bb.arrayOffset(), bb.limit(), splitkey, 0,
                        splitkey.length) >= 0) {
                    atEnd = true;
                    return false;
                }
            }
            return true;
        }

        public boolean seekBefore(byte[] key) throws IOException {
            return seekBefore(key, 0, key.length);
        }

        public boolean seekBefore(byte[] key, int offset, int length) throws IOException {
            if (top) {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
                    return false;
                }
            } else {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) >= 0) {
                    return seekBefore(splitkey, 0, splitkey.length);
                }
            }
            return this.delegate.seekBefore(key, offset, length);
        }

        public boolean seekTo() throws IOException {
            if (top) {
                int r = this.delegate.seekTo(splitkey);
                if (r < 0) {
                    // midkey is < first key in file
                    return this.delegate.seekTo();
                }
                if (r > 0) {
                    return this.delegate.next();
                }
                return true;
            }

            boolean b = delegate.seekTo();
            if (!b) {
                return b;
            }
            // Check key.
            ByteBuffer k = this.delegate.getKey();
            return this.delegate.getReader().getComparator().compare(k.array(), k.arrayOffset(), k.limit(),
                    splitkey, 0, splitkey.length) < 0;
        }

        public int seekTo(byte[] key) throws IOException {
            return seekTo(key, 0, key.length);
        }

        public int seekTo(byte[] key, int offset, int length) throws IOException {
            if (top) {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) < 0) {
                    return -1;
                }
            } else {
                if (getComparator().compare(key, offset, length, splitkey, 0, splitkey.length) >= 0) {
                    // we would place the scanner in the second half.
                    // it might be an error to return false here ever...
                    boolean res = delegate.seekBefore(splitkey, 0, splitkey.length);
                    if (!res) {
                        throw new IOException(
                                "Seeking for a key in bottom of file, but key exists in top of file, failed on seekBefore(midkey)");
                    }
                    return 1;
                }
            }
            return delegate.seekTo(key, offset, length);
        }

        public Reader getReader() {
            return this.delegate.getReader();
        }

        public boolean isSeeked() {
            return this.delegate.isSeeked();
        }
    };
}

From source file:com.facebook.hive.orc.ReaderImpl.java

public ReaderImpl(FileSystem fs, Path path, Configuration conf) throws IOException {
    try {//from ww w  .j  a v  a  2  s .  c o  m
        this.fileSystem = fs;
        this.path = path;
        this.conf = conf;
        FSDataInputStream file = fs.open(path);
        long size = fs.getFileStatus(path).getLen();
        int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
        ByteBuffer buffer = ByteBuffer.allocate(readSize);
        InStream.read(file, size - readSize, buffer.array(), buffer.arrayOffset() + buffer.position(),
                buffer.remaining());
        int psLen = buffer.get(readSize - 1);
        int psOffset = readSize - 1 - psLen;
        CodedInputStream in = CodedInputStream.newInstance(buffer.array(), buffer.arrayOffset() + psOffset,
                psLen);
        OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in);
        int footerSize = (int) ps.getFooterLength();
        bufferSize = (int) ps.getCompressionBlockSize();
        switch (ps.getCompression()) {
        case NONE:
            compressionKind = CompressionKind.NONE;
            break;
        case ZLIB:
            compressionKind = CompressionKind.ZLIB;
            break;
        case SNAPPY:
            compressionKind = CompressionKind.SNAPPY;
            break;
        case LZO:
            compressionKind = CompressionKind.LZO;
            break;
        default:
            throw new IllegalArgumentException("Unknown compression");
        }
        codec = WriterImpl.createCodec(compressionKind);

        InputStream instream = InStream.create("footer", file, size - 1 - psLen - footerSize, footerSize, codec,
                bufferSize);
        footer = OrcProto.Footer.parseFrom(instream);
        inspector = new OrcLazyRowObjectInspector(0, footer.getTypesList());
        file.close();
    } catch (IndexOutOfBoundsException e) {
        /**
         * When a non ORC file is read by ORC reader, we get IndexOutOfBoundsException exception while
         * creating a reader. Caught that exception and checked the file header to see if the input
         * file was ORC or not. If its not ORC, throw a NotAnORCFileException with the file
         * attempted to be reading (thus helping to figure out which table-partition was being read).
         */
        checkIfORC(fs, path);
        throw new IOException("Failed to create record reader for file " + path, e);
    } catch (IOException e) {
        throw new IOException("Failed to create record reader for file " + path, e);
    }
}

From source file:org.eclipse.jgit.lfs.server.fs.LfsServerTest.java

private void checkResponseStatus(HttpResponse response) {
    StatusLine statusLine = response.getStatusLine();
    int status = statusLine.getStatusCode();
    if (statusLine.getStatusCode() >= 400) {
        String error;//from   w  w  w .  j  av a 2  s  .  c o m
        try {
            ByteBuffer buf = IO.readWholeStream(new BufferedInputStream(response.getEntity().getContent()),
                    1024);
            if (buf.hasArray()) {
                error = new String(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining(), UTF_8);
            } else {
                final byte[] b = new byte[buf.remaining()];
                buf.duplicate().get(b);
                error = new String(b, UTF_8);
            }
        } catch (IOException e) {
            error = statusLine.getReasonPhrase();
        }
        throw new RuntimeException("Status: " + status + " " + error);
    }
    assertEquals(200, status);
}

From source file:org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.java

/**
 * To check whether a trailer is present in a WAL, it seeks to position (fileLength -
 * PB_WAL_COMPLETE_MAGIC.size() - Bytes.SIZEOF_INT). It reads the int value to know the size of
 * the trailer, and checks whether the trailer is present at the end or not by comparing the last
 * PB_WAL_COMPLETE_MAGIC.size() bytes. In case trailer is not present, it returns false;
 * otherwise, sets the trailer and sets this.walEditsStopOffset variable up to the point just
 * before the trailer./*from  w w w  .  j  a v  a2s. co  m*/
 * <ul>
 * The trailer is ignored in case:
 * <li>fileLength is 0 or not correct (when file is under recovery, etc).
 * <li>the trailer size is negative.
 * </ul>
 * <p>
 * In case the trailer size > this.trailerMaxSize, it is read after a WARN message.
 * @return true if a valid trailer is present
 * @throws IOException
 */
private boolean setTrailerIfPresent() {
    try {
        long trailerSizeOffset = this.fileLength - (PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT);
        if (trailerSizeOffset <= 0)
            return false;// no trailer possible.
        this.seekOnFs(trailerSizeOffset);
        // read the int as trailer size.
        int trailerSize = this.inputStream.readInt();
        ByteBuffer buf = ByteBuffer.allocate(ProtobufLogReader.PB_WAL_COMPLETE_MAGIC.length);
        this.inputStream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
        if (!Arrays.equals(buf.array(), PB_WAL_COMPLETE_MAGIC)) {
            LOG.trace("No trailer found.");
            return false;
        }
        if (trailerSize < 0) {
            LOG.warn("Invalid trailer Size " + trailerSize + ", ignoring the trailer");
            return false;
        } else if (trailerSize > this.trailerWarnSize) {
            // continue reading after warning the user.
            LOG.warn("Please investigate WALTrailer usage. Trailer size > maximum configured size : "
                    + trailerSize + " > " + this.trailerWarnSize);
        }
        // seek to the position where trailer starts.
        long positionOfTrailer = trailerSizeOffset - trailerSize;
        this.seekOnFs(positionOfTrailer);
        // read the trailer.
        buf = ByteBuffer.allocate(trailerSize);// for trailer.
        this.inputStream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
        trailer = WALTrailer.parseFrom(buf.array());
        this.walEditsStopOffset = positionOfTrailer;
        return true;
    } catch (IOException ioe) {
        LOG.warn("Got IOE while reading the trailer. Continuing as if no trailer is present.", ioe);
    }
    return false;
}

From source file:io.protostuff.JsonOutput.java

/**
 * Writes a ByteBuffer field.//from w  ww  .  j  a v  a 2 s.  c o m
 */
@Override
public void writeBytes(int fieldNumber, ByteBuffer value, boolean repeated) throws IOException {
    writeByteRange(false, fieldNumber, value.array(), value.arrayOffset() + value.position(), value.remaining(),
            repeated);
}

From source file:org.apache.hadoop.hbase.io.hfile.TestChecksum.java

protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true)
                    .withIncludesTags(useTags).withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
                    .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i)
                    dos.writeInt(i);//  w  w  w .j  a  v  a2  s . c o  m
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();

            // Use hbase checksums. 
            assertEquals(true, hfs.useHBaseChecksum());

            // Do a read that purposely introduces checksum verification failures.
            FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true)
                    .withIncludesTags(useTags).withHBaseCheckSum(true).build();
            HFileBlock.FSReader hbr = new FSReaderV2Test(is, totalSize, fs, path, meta);
            HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            ByteBuffer bb = b.getBufferWithoutHeader(); // read back data
            DataInputStream in = new DataInputStream(
                    new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));

            // assert that we encountered hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(1, HFile.getChecksumFailuresCount());
            validateData(in);

            // A single instance of hbase checksum failure causes the reader to
            // switch off hbase checksum verification for the next 100 read
            // requests. Verify that this is correct.
            for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
                b = hbr.readBlockData(0, -1, -1, pread);
                assertEquals(0, HFile.getChecksumFailuresCount());
            }
            // The next read should have hbase checksum verification reanabled,
            // we verify this by assertng that there was a hbase-checksum failure.
            b = hbr.readBlockData(0, -1, -1, pread);
            assertEquals(1, HFile.getChecksumFailuresCount());

            // Since the above encountered a checksum failure, we switch
            // back to not checking hbase checksums.
            b = hbr.readBlockData(0, -1, -1, pread);
            assertEquals(0, HFile.getChecksumFailuresCount());
            is.close();

            // Now, use a completely new reader. Switch off hbase checksums in 
            // the configuration. In this case, we should not detect
            // any retries within hbase. 
            HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
            assertEquals(false, newfs.useHBaseChecksum());
            is = new FSDataInputStreamWrapper(newfs, path);
            hbr = new FSReaderV2Test(is, totalSize, newfs, path, meta);
            b = hbr.readBlockData(0, -1, -1, pread);
            is.close();
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            bb = b.getBufferWithoutHeader(); // read back data
            in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));

            // assert that we did not encounter hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(0, HFile.getChecksumFailuresCount());
            validateData(in);
        }
    }
}

From source file:edu.umn.cs.spatialHadoop.core.ZCurvePartitioner.java

@Override
public void write(DataOutput out) throws IOException {
    mbr.write(out);/*from   ww w  .  j a v a 2  s. c o  m*/
    out.writeInt(zSplits.length);
    ByteBuffer bbuffer = ByteBuffer.allocate(zSplits.length * 8);
    for (long zSplit : zSplits)
        bbuffer.putLong(zSplit);
    if (bbuffer.hasRemaining())
        throw new RuntimeException("Did not calculate buffer size correctly");
    out.write(bbuffer.array(), bbuffer.arrayOffset(), bbuffer.position());
}

From source file:edu.umn.cs.spatialHadoop.indexing.BTRPartitioner.java

@Override
public void write(DataOutput out) throws IOException {
    mbr.write(out);/*from   ww w. jav a  2 s . c  om*/
    out.writeInt(columns);
    out.writeInt(rows);
    ByteBuffer bbuffer = ByteBuffer.allocate((xSplits.length + ySplits.length) * 8);
    for (double xSplit : xSplits)
        bbuffer.putDouble(xSplit);
    for (double ySplit : ySplits)
        bbuffer.putDouble(ySplit);
    if (bbuffer.hasRemaining())
        throw new RuntimeException("Did not calculate buffer size correctly");
    out.write(bbuffer.array(), bbuffer.arrayOffset(), bbuffer.position());
}

From source file:org.apache.hadoop.hbase.io.ValueSplitHalfStoreFileReader.java

@Override
public HFileScanner getScanner(final boolean cacheBlocks, final boolean pread, final boolean isCompaction) {
    final HFileScanner s = getHFileReader().getScanner(cacheBlocks, pread, isCompaction);
    return new HFileScanner() {
        final HFileScanner delegate = s;

        @Override//ww w .  j a v a  2  s  .  c o  m
        public ByteBuffer getKey() {
            return delegate.getKey();
        }

        @Override
        public String getKeyString() {
            return delegate.getKeyString();
        }

        @Override
        public ByteBuffer getValue() {
            return delegate.getValue();
        }

        @Override
        public String getValueString() {
            return delegate.getValueString();
        }

        @Override
        public KeyValue getKeyValue() {
            return delegate.getKeyValue();
        }

        @Override
        public boolean next() throws IOException {
            while (delegate.next()) {
                if (isCurrentKVValid()) {
                    return true;
                }
            }
            return false;
        }

        @Override
        public boolean seekBefore(byte[] key) throws IOException {
            return seekBefore(key, 0, key.length);
        }

        @Override
        public boolean seekBefore(byte[] key, int offset, int length) throws IOException {
            byte[] seekKey = key;
            int seekKeyOffset = offset;
            int seekKeyLength = length;
            while (delegate.seekBefore(seekKey, seekKeyOffset, seekKeyLength)) {
                if (isCurrentKVValid()) {
                    return true;
                }
                ByteBuffer curKey = getKey();
                if (curKey == null)
                    return false;
                seekKey = curKey.array();
                seekKeyOffset = curKey.arrayOffset();
                seekKeyLength = curKey.limit();
            }
            return false;
        }

        private boolean isCurrentKVValid() {
            ByteBuffer value = getValue();
            if (!top) {
                // Current value < split key, it belongs to bottom, return true
                if (Bytes.compareTo(value.array(), value.arrayOffset(), value.limit(), splitvalue, 0,
                        splitvalue.length) < 0) {
                    return true;
                }
            } else {
                if (Bytes.compareTo(value.array(), value.arrayOffset(), value.limit(), splitvalue, 0,
                        splitvalue.length) >= 0) {
                    return true;
                }
            }
            return false;
        }

        @Override
        public boolean seekTo() throws IOException {
            boolean b = delegate.seekTo();
            if (!b) {
                return b;
            }

            if (isCurrentKVValid()) {
                return true;
            }

            return next();
        }

        @Override
        public int seekTo(byte[] key) throws IOException {
            return seekTo(key, 0, key.length);
        }

        public int seekTo(byte[] key, int offset, int length) throws IOException {
            int b = delegate.seekTo(key, offset, length);
            if (b < 0) {
                return b;
            } else {
                if (isCurrentKVValid()) {
                    return b;
                } else {
                    boolean existBefore = seekBefore(key, offset, length);
                    if (existBefore) {
                        return 1;
                    }
                    return -1;
                }
            }
        }

        @Override
        public int reseekTo(byte[] key) throws IOException {
            return reseekTo(key, 0, key.length);
        }

        @Override
        public int reseekTo(byte[] key, int offset, int length) throws IOException {
            int b = delegate.reseekTo(key, offset, length);
            if (b < 0) {
                return b;
            } else {
                if (isCurrentKVValid()) {
                    return b;
                } else {
                    boolean existBefore = seekBefore(key, offset, length);
                    if (existBefore) {
                        return 1;
                    }
                    return -1;
                }
            }
        }

        public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() {
            return this.delegate.getReader();
        }

        public boolean isSeeked() {
            return this.delegate.isSeeked();
        }
    };
}