Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.apache.hadoop.hive.ql.io.orc.ExternalCache.java

private static OrcTail createOrcTailFromMs(HdfsFileStatusWithId file, ByteBuffer bb) throws IOException {
    if (bb == null)
        return null;
    FileStatus fs = file.getFileStatus();
    ByteBuffer copy = bb.duplicate();
    try {// www .ja v a  2  s.  co  m
        OrcTail orcTail = ReaderImpl.extractFileTail(copy, fs.getLen(), fs.getModificationTime());
        // trigger lazy read of metadata to make sure serialized data is not corrupted and readable
        orcTail.getStripeStatistics();
        return orcTail;
    } catch (Exception ex) {
        byte[] data = new byte[bb.remaining()];
        System.arraycopy(bb.array(), bb.arrayOffset() + bb.position(), data, 0, data.length);
        String msg = "Failed to parse the footer stored in cache for file ID " + file.getFileId() + " " + bb
                + " [ " + Hex.encodeHexString(data) + " ]";
        LOG.error(msg, ex);
        return null;
    }
}

From source file:LamportBasicVersion.java

private static String byteToString(ByteBuffer byteBufferFromNeighbor, MessageInfo messageInfoFromNeighbor) {
    byteBufferFromNeighbor.position(0);//from  w w w . j  a  v a2 s .co  m
    byteBufferFromNeighbor.limit(messageInfoFromNeighbor.bytes());
    byte[] bufArr = new byte[byteBufferFromNeighbor.remaining()];
    byteBufferFromNeighbor.get(bufArr);
    return new String(bufArr);
}

From source file:backtype.storm.utils.Utils.java

public static byte[] toByteArray(ByteBuffer buffer) {
    byte[] ret = new byte[buffer.remaining()];
    buffer.get(ret, 0, ret.length);//from ww  w.  j  a v a  2s.  c o m
    return ret;
}

From source file:org.apache.hadoop.fs.TestEnhancedByteBufferAccess.java

private static void testFallbackImpl(InputStream stream, byte original[]) throws Exception {
    RestrictedAllocatingByteBufferPool bufferPool = new RestrictedAllocatingByteBufferPool(
            stream instanceof ByteBufferReadable);

    ByteBuffer result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
    Assert.assertEquals(10, result.remaining());
    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 10), byteBufferToArray(result));

    result = ByteBufferUtil.fallbackRead(stream, bufferPool, 5000);
    Assert.assertEquals(5000, result.remaining());
    Assert.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010), byteBufferToArray(result));

    result = ByteBufferUtil.fallbackRead(stream, bufferPool, 9999999);
    Assert.assertEquals(11375, result.remaining());
    Assert.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385), byteBufferToArray(result));

    result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
    Assert.assertNull(result);/* w  w  w . j ava  2s  .  co m*/
}

From source file:io.mycat.util.ByteBufferUtil.java

public static InputStream inputStream(ByteBuffer bytes) {
    final ByteBuffer copy = bytes.duplicate();

    return new InputStream() {
        public int read() {
            if (!copy.hasRemaining()) {
                return -1;
            }/* w  w w.  ja  v a  2  s .c  o m*/

            return copy.get() & 0xFF;
        }

        @Override
        public int read(byte[] bytes, int off, int len) {
            if (!copy.hasRemaining()) {
                return -1;
            }

            len = Math.min(len, copy.remaining());
            copy.get(bytes, off, len);
            return len;
        }

        @Override
        public int available() {
            return copy.remaining();
        }
    };
}

From source file:byps.test.TestUtils.java

public static String bufferToString(ByteBuffer buf) {
    try {//from   ww  w  . j  a v a  2  s  .co  m
        boolean isString = true;
        byte[] arr = buf.array();
        for (int i = 0; i < buf.remaining(); i++) {
            byte c = arr[i];
            if (c <= 127 || ((c & 0xC0) == 0x80) || ((c & 0xE0) == 0xC0))
                continue;
            isString = false;
            break;
        }
        if (isString) {
            return new String(buf.array(), 0, buf.remaining(), "UTF-8");
        } else {
            StringBuilder sbuf = new StringBuilder();
            sbuf.append("byte[] bytes = new byte[] {");
            for (int i = 0; i < buf.remaining(); i++) {
                if (i != 0)
                    sbuf.append(", ");
                if ((i % 10) == 0)
                    sbuf.append("\r\n");
                sbuf.append("(byte)0x");
                int c = arr[i] & 0xFF;
                String s = Integer.toHexString(c);
                if (s.length() < 2)
                    sbuf.append("0");
                sbuf.append(s);
            }
            sbuf.append("};");
            return sbuf.toString();
        }
    } catch (UnsupportedEncodingException e) {
        throw new IllegalStateException(e);
    }
}

From source file:org.apache.orc.impl.mask.RedactMaskFactory.java

/**
 * Get the next code point from the ByteBuffer. Moves the position in the
 * ByteBuffer forward to the next code point.
 * @param param the source of bytes// www.  ja v a2s  . com
 * @param defaultValue if there are no bytes left, use this value
 * @return the code point that was found at the front of the buffer.
 */
static int getNextCodepoint(ByteBuffer param, int defaultValue) {
    if (param.remaining() == 0) {
        return defaultValue;
    } else {
        return Text.bytesToCodePoint(param);
    }
}

From source file:com.gamesalutes.utils.ByteUtils.java

/**
 * Reads all the bytes from the given input stream and stores them in the specified buffer.
 * If the input buffer is <code>null</code> or does not have the capacity to store all the input, a 
 * new buffer is created and returned.  The input stream is closed regardless of whether an
 * <code>IOException</code> is thrown.
 * // w  w  w . j a v a 2 s .  c  o m
 * 
 * @param in the <code>InputStream</code> to read
 * @param buf a <code>ByteBuffer</code> to use for storage or <code>null</code> to just allocate a new one
 *        If <code>buf</code> is not large enough it will be expanded using {@link #growBuffer(ByteBuffer, int)}
 * @return the buffer containing the read data
 * @throws IOException
 */
public static ByteBuffer readBytes(InputStream in, ByteBuffer buf) throws IOException {
    try {
        if (buf == null)
            buf = ByteBuffer.allocate(READ_BUFFER_SIZE);

        // note the input position
        int startPos = buf.position();

        byte[] tmp = new byte[NETWORK_BYTE_SIZE];
        int read;
        // read until end of file
        while ((read = in.read(tmp)) > 0) {
            if (buf.remaining() < read) {
                buf = ByteUtils.growBuffer(buf, buf.limit() + (read - buf.remaining()));
            }
            buf.put(tmp, 0, read);
        }

        buf.flip();
        // reset starting position to be that of input buffer
        buf.position(startPos);
        return buf;
    } finally {
        MiscUtils.closeStream(in);
    }

}

From source file:com.turn.ttorrent.common.TorrentCreator.java

/**
 * Return the concatenation of the SHA-1 hashes of a file's pieces.
 *
 * <p>/* ww  w .  j a  va 2s.  c  o  m*/
 * Hashes the given file piece by piece using the default Torrent piece
 * length (see {@link #PIECE_LENGTH}) and returns the concatenation of
 * these hashes, as a string.
 * </p>
 *
 * <p>
 * This is used for creating Torrent meta-info structures from a file.
 * </p>
 *
 * @param file The file to hash.
 */
public /* for testing */ static byte[] hashFiles(Executor executor, List<File> files, long nbytes,
        int pieceLength) throws InterruptedException, IOException {
    int npieces = (int) Math.ceil((double) nbytes / pieceLength);
    byte[] out = new byte[Torrent.PIECE_HASH_SIZE * npieces];
    CountDownLatch latch = new CountDownLatch(npieces);

    ByteBuffer buffer = ByteBuffer.allocate(pieceLength);

    long start = System.nanoTime();
    int piece = 0;
    for (File file : files) {
        logger.info("Hashing data from {} ({} pieces)...",
                new Object[] { file.getName(), (int) Math.ceil((double) file.length() / pieceLength) });

        FileInputStream fis = FileUtils.openInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.flip();
                    executor.execute(new ChunkHasher(out, piece, latch, buffer));
                    buffer = ByteBuffer.allocate(pieceLength);
                    piece++;
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.flip();
        executor.execute(new ChunkHasher(out, piece, latch, buffer));
        piece++;
    }

    // Wait for hashing tasks to complete.
    latch.await();
    long elapsed = System.nanoTime() - start;

    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.",
            new Object[] { files.size(), nbytes, piece, npieces, String.format("%.1f", elapsed / 1e6) });

    return out;
}

From source file:org.apache.orc.impl.RecordReaderUtils.java

/**
 * Read the list of ranges from the file.
 * @param file the file to read//from  w w  w . ja  v a 2  s.  c om
 * @param base the base of the stripe
 * @param range the disk ranges within the stripe to read
 * @return the bytes read for each disk range, which is the same length as
 *    ranges
 * @throws IOException
 */
static DiskRangeList readDiskRanges(FSDataInputStream file, HadoopShims.ZeroCopyReaderShim zcr, long base,
        DiskRangeList range, boolean doForceDirect) throws IOException {
    if (range == null)
        return null;
    DiskRangeList prev = range.prev;
    if (prev == null) {
        prev = new MutateHelper(range);
    }
    while (range != null) {
        if (range.hasData()) {
            range = range.next;
            continue;
        }
        int len = (int) (range.getEnd() - range.getOffset());
        long off = range.getOffset();
        if (zcr != null) {
            file.seek(base + off);
            boolean hasReplaced = false;
            while (len > 0) {
                ByteBuffer partial = zcr.readBuffer(len, false);
                BufferChunk bc = new BufferChunk(partial, off);
                if (!hasReplaced) {
                    range.replaceSelfWith(bc);
                    hasReplaced = true;
                } else {
                    range.insertAfter(bc);
                }
                range = bc;
                int read = partial.remaining();
                len -= read;
                off += read;
            }
        } else {
            // Don't use HDFS ByteBuffer API because it has no readFully, and is buggy and pointless.
            byte[] buffer = new byte[len];
            file.readFully((base + off), buffer, 0, buffer.length);
            ByteBuffer bb = null;
            if (doForceDirect) {
                bb = ByteBuffer.allocateDirect(len);
                bb.put(buffer);
                bb.position(0);
                bb.limit(len);
            } else {
                bb = ByteBuffer.wrap(buffer);
            }
            range = range.replaceSelfWith(new BufferChunk(bb, range.getOffset()));
        }
        range = range.next;
    }
    return prev.next;
}