Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final Buffer limit(int newLimit) 

Source Link

Document

Sets the limit of this buffer.

Usage

From source file:org.apache.hadoop.hdfs.BlockReaderLocalLegacy.java

/**
 * Tries to read as many bytes as possible into supplied buffer, checksumming
 * each chunk if needed.// w  ww  . jav a2s . c om
 *
 * <b>Preconditions:</b>
 * <ul>
 * <li>
 * If checksumming is enabled, buf.remaining must be a multiple of
 * bytesPerChecksum. Note that this is not a requirement for clients of
 * read(ByteBuffer) - in the case of non-checksum-sized read requests,
 * read(ByteBuffer) will substitute a suitably sized buffer to pass to this
 * method.
 * </li>
 * </ul>
 * <b>Postconditions:</b>
 * <ul>
 * <li>buf.limit and buf.mark are unchanged.</li>
 * <li>buf.position += min(offsetFromChunkBoundary, totalBytesRead) - so the
 * requested bytes can be read straight from the buffer</li>
 * </ul>
 *
 * @param buf
 *          byte buffer to write bytes to. If checksums are not required, buf
 *          can have any number of bytes remaining, otherwise there must be a
 *          multiple of the checksum chunk size remaining.
 * @return <tt>max(min(totalBytesRead, len) - offsetFromChunkBoundary, 0)</tt>
 *         that is, the the number of useful bytes (up to the amount
 *         requested) readable from the buffer by the client.
 */
private synchronized int doByteBufferRead(ByteBuffer buf) throws IOException {
    if (verifyChecksum) {
        assert buf.remaining() % bytesPerChecksum == 0;
    }
    int dataRead = -1;

    int oldpos = buf.position();
    // Read as much as we can into the buffer.
    dataRead = fillBuffer(dataIn, buf);

    if (dataRead == -1) {
        return -1;
    }

    if (verifyChecksum) {
        ByteBuffer toChecksum = buf.duplicate();
        toChecksum.position(oldpos);
        toChecksum.limit(oldpos + dataRead);

        checksumBuff.clear();
        // Equivalent to (int)Math.ceil(toChecksum.remaining() * 1.0 / bytesPerChecksum );
        int numChunks = (toChecksum.remaining() + bytesPerChecksum - 1) / bytesPerChecksum;
        checksumBuff.limit(checksumSize * numChunks);

        fillBuffer(checksumIn, checksumBuff);
        checksumBuff.flip();

        checksum.verifyChunkedSums(toChecksum, checksumBuff, filename, this.startOffset);
    }

    if (dataRead >= 0) {
        buf.position(oldpos + Math.min(offsetFromChunkBoundary, dataRead));
    }

    if (dataRead < offsetFromChunkBoundary) {
        // yikes, didn't even get enough bytes to honour offset. This can happen
        // even if we are verifying checksums if we are at EOF.
        offsetFromChunkBoundary -= dataRead;
        dataRead = 0;
    } else {
        dataRead -= offsetFromChunkBoundary;
        offsetFromChunkBoundary = 0;
    }

    return dataRead;
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void commitBucketTableToDisk() throws BucketTableManagerException {
    File currentFile = null;//from  w  ww  . ja va2  s . co m
    FileChannel fileChannel = null;
    ByteBuffer headerBuffer = null;
    try {
        logger.warn("Start commit bucket table...");
        if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty())
            throw new BucketTableManagerException("commit requested while there is no requested checkpoint");
        currentFile = getLatestCommitedFile();
        File nextFile = getNextFile(getLatestCommitedFile());
        fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel();
        // Write header with empty checkpoint 
        headerBuffer = ByteBuffer.allocate(HEADERSIZE);
        fileChannel.position(0L);
        headerBuffer.putInt(MAGICSTART);
        headerBuffer.putLong(mapSize);
        // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done
        NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle
        lastCheckPoint.putNeedlePointerToBuffer(headerBuffer);
        headerBuffer.putInt(MAGICEND);
        headerBuffer.flip(); // truncate buffer
        fileChannel.write(headerBuffer);
        // Now writes buffers
        for (int i = 0; i < nbBuffers; i++) {
            bucketTable.prepareBufferForWriting(i);
            int written = fileChannel.write(bucketTable.getBuffer(i));
            if (written < bucketTable.getBuffer(i).limit())
                throw new BucketTableManagerException("Incomplete write for bucket table file "
                        + nextFile.getName() + ", expected " + mapSize + HEADERSIZE);
            // else
            // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            try {
                Thread.sleep(10);
            } catch (Throwable th) {

            }
        }
        // Writes second magic number
        ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
        buffer.rewind();
        buffer.limit(INTSIZE);
        buffer.putInt(MAGICSTART);
        buffer.rewind();
        fileChannel.write(buffer);
        // Write Needle Log Info
        Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator();
        while (it.hasNext()) {
            buffer.rewind();
            buffer.limit(NeedleLogInfo.INFOSIZE);
            NeedleLogInfo nli = it.next();
            nli.putNeedleLogInfo(buffer, true);
            int written = fileChannel.write(buffer);
            if (written < NeedleLogInfo.INFOSIZE)
                throw new BucketTableManagerException(
                        "Incomplete write for bucket table file, writing log infos " + nextFile.getName());
        }
        // Writes checkpoint
        headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE);
        headerBuffer.rewind();
        headerBuffer.limit(NeedlePointer.POINTERSIZE);
        // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ;
        bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write
        headerBuffer.rewind();
        // fileChannel.force(false) ;
        if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) {
            throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName());
        }
        fileChannel.force(true);
        fileChannel.close();
        if (!nextFile.renameTo(getCommittedFile(nextFile)))
            throw new BucketTableManagerException(
                    "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName());

        logger.warn("Committed bucket table.");
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed writting bucket table", ie);
    } finally {
        headerBuffer = null; //May ease garbage collection
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (Exception ex) {
                throw new BucketTableManagerException("Failed to close file channel", ex);
            }
        }
    }
    try {
        if (currentFile != null) {
            if (!currentFile.delete())
                logger.error("Failed deleting previous bucket table" + currentFile.getName());
        }
    } catch (Throwable th) {
        logger.error("Failed deleting previous bucket table" + currentFile.getName(), th);
    }
}

From source file:org.apache.hadoop.mapred.FadvisedFileRegion.java

/**
 * This method transfers data using local buffer. It transfers data from 
 * a disk to a local buffer in memory, and then it transfers data from the 
 * buffer to the target. This is used only if transferTo is disallowed in
 * the configuration file. super.TransferTo does not perform well on Windows 
 * due to a small IO request generated. customShuffleTransfer can control 
 * the size of the IO requests by changing the size of the intermediate 
 * buffer./*w w  w .j  ava 2  s. co  m*/
 */
@VisibleForTesting
long customShuffleTransfer(WritableByteChannel target, long position) throws IOException {
    long actualCount = this.count - position;
    if (actualCount < 0 || position < 0) {
        throw new IllegalArgumentException(
                "position out of range: " + position + " (expected: 0 - " + (this.count - 1) + ')');
    }
    if (actualCount == 0) {
        return 0L;
    }

    long trans = actualCount;
    int readSize;
    ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);

    while (trans > 0L && (readSize = fileChannel.read(byteBuffer, this.position + position)) > 0) {
        //adjust counters and buffer limit
        if (readSize < trans) {
            trans -= readSize;
            position += readSize;
            byteBuffer.flip();
        } else {
            //We can read more than we need if the actualCount is not multiple 
            //of the byteBuffer size and file is big enough. In that case we cannot
            //use flip method but we need to set buffer limit manually to trans.
            byteBuffer.limit((int) trans);
            byteBuffer.position(0);
            position += trans;
            trans = 0;
        }

        //write data to the target
        while (byteBuffer.hasRemaining()) {
            target.write(byteBuffer);
        }

        byteBuffer.clear();
    }

    return actualCount - trans;
}

From source file:org.apache.tajo.pullserver.FadvisedFileRegion.java

/**
 * This method transfers data using local buffer. It transfers data from
 * a disk to a local buffer in memory, and then it transfers data from the
 * buffer to the target. This is used only if transferTo is disallowed in
 * the configuration file. super.TransferTo does not perform well on Windows
 * due to a small IO request generated. customShuffleTransfer can control
 * the size of the IO requests by changing the size of the intermediate
 * buffer.//from   ww  w .  ja  v  a2  s  .c o  m
 */
@VisibleForTesting
long customShuffleTransfer(WritableByteChannel target, long position) throws IOException {
    long actualCount = this.count - position;
    if (actualCount < 0 || position < 0) {
        throw new IllegalArgumentException(
                "position out of range: " + position + " (expected: 0 - " + (this.count - 1) + ')');
    }
    if (actualCount == 0) {
        return 0L;
    }

    long trans = actualCount;
    int readSize;
    ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);

    while (trans > 0L && (readSize = fileChannel.read(byteBuffer, this.position + position)) > 0) {
        //adjust counters and buffer limit
        if (readSize < trans) {
            trans -= readSize;
            position += readSize;
            byteBuffer.flip();
        } else {
            //We can read more than we need if the actualCount is not multiple
            //of the byteBuffer size and file is big enough. In that case we cannot
            //use flip method but we need to set buffer limit manually to trans.
            byteBuffer.limit((int) trans);
            byteBuffer.position(0);
            position += trans;
            trans = 0;
        }

        //write data to the target
        while (byteBuffer.hasRemaining()) {
            target.write(byteBuffer);
        }

        byteBuffer.clear();
    }

    return actualCount - trans;
}

From source file:org.apache.tajo.storage.orc.OrcScanner.java

private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, long maxFileLength)
        throws IOException {
    FSDataInputStream file = fs.open(path);

    // figure out the size of the file using the option or filesystem
    long size;//from w  ww.  ja v  a  2s  .  co  m
    if (maxFileLength == Long.MAX_VALUE) {
        size = fs.getFileStatus(path).getLen();
    } else {
        size = maxFileLength;
    }

    //read last bytes into buffer to get PostScript
    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
    ByteBuffer buffer = ByteBuffer.allocate(readSize);
    assert buffer.position() == 0;
    file.readFully((size - readSize), buffer.array(), buffer.arrayOffset(), readSize);
    buffer.position(0);

    //read the PostScript
    //get length of PostScript
    int psLen = buffer.get(readSize - 1) & 0xff;
    ensureOrcFooter(file, path, psLen, buffer);
    int psOffset = readSize - 1 - psLen;
    OrcProto.PostScript ps = extractPostScript(buffer, path, psLen, psOffset);

    int footerSize = (int) ps.getFooterLength();
    int metadataSize = (int) ps.getMetadataLength();

    //check if extra bytes need to be read
    ByteBuffer fullFooterBuffer = null;
    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
    if (extra > 0) {
        //more bytes need to be read, seek back to the right place and read extra bytes
        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
        file.readFully((size - readSize - extra), extraBuf.array(),
                extraBuf.arrayOffset() + extraBuf.position(), extra);
        extraBuf.position(extra);
        //append with already read bytes
        extraBuf.put(buffer);
        buffer = extraBuf;
        buffer.position(0);
        fullFooterBuffer = buffer.slice();
        buffer.limit(footerSize + metadataSize);
    } else {
        //footer is already in the bytes in buffer, just adjust position, length
        buffer.position(psOffset - footerSize - metadataSize);
        fullFooterBuffer = buffer.slice();
        buffer.limit(psOffset);
    }

    // remember position for later
    buffer.mark();

    file.close();

    return new FileMetaInfo(ps.getCompression().toString(), (int) ps.getCompressionBlockSize(),
            (int) ps.getMetadataLength(), buffer, ps.getVersionList(),
            org.apache.orc.OrcFile.WriterVersion.FUTURE, fullFooterBuffer);
}

From source file:com.github.ambry.utils.UtilsTest.java

@Test
public void testReadFileToByteBuffer() throws IOException {
    File file = File.createTempFile("test", "1");
    file.deleteOnExit();/*from w w  w  .  j  a v a 2 s. c om*/
    FileChannel fileChannel = Utils.openChannel(file, false);
    byte[] referenceBytes = new byte[20];
    new Random().nextBytes(referenceBytes);
    FileUtils.writeByteArrayToFile(file, referenceBytes);

    // fill up fresh byteBuffer
    ByteBuffer buffer = ByteBuffer.allocate(20);
    Utils.readFileToByteBuffer(fileChannel, 0, buffer);
    assertArrayEquals("Data mismatch", referenceBytes, buffer.array());

    // write to byteBuffer based on buffer remaining
    buffer.limit(10);
    buffer.position(0);
    assertEquals("buffer remaining should be 10", 10, buffer.remaining());
    Utils.readFileToByteBuffer(fileChannel, 10, buffer);
    assertEquals("buffer remaining should be 0", 0, buffer.remaining());
    for (int i = 0; i < 10; i++) {
        assertEquals("First 10 bytes in buffer should match last 10 bytes in file", buffer.array()[i],
                referenceBytes[i + 10]);
    }

    // byteBuffer.remaining() + starting offset > file size, exception is expected.
    buffer.clear();
    assertEquals("buffer remaining should be 20", 20, buffer.remaining());
    try {
        Utils.readFileToByteBuffer(fileChannel, 1, buffer);
        fail("Should fail");
    } catch (IOException e) {
    }

    // starting offset exceeds file size, exception is expected.
    buffer.clear();
    assertEquals("buffer remaining should be 20", 20, buffer.remaining());
    try {
        Utils.readFileToByteBuffer(fileChannel, 21, buffer);
        fail("Should fail");
    } catch (IOException e) {
    }
}

From source file:me.carpela.network.pt.cracker.tools.ttorrent.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {

        length += file.length();//from w w w  .j  ava 2s.  c o  m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    return hashes.toString();
}

From source file:com.sonymobile.android.media.internal.VUParser.java

protected ByteBuffer parseAvccForMarlin(byte[] buffer) {
    int currentBufferOffset = 0;
    if (buffer[0] != 1) { // configurationVersion
        return null;
    }//  w  w w  . j  a  va  2 s .co  m

    int numSPS = buffer[5] & 31; // numOfSequenceParameterSets
    currentBufferOffset = 6;
    byte[] csdArray = new byte[1024];
    int csdArrayOffset = 0;
    for (int i = 0; i < numSPS; i++) {
        int spsLength = ((buffer[currentBufferOffset++] & 0xFF) << 8 | buffer[currentBufferOffset++] & 0xFF)
                & 0x0000FFFF;

        csdArray[csdArrayOffset++] = 0;
        csdArray[csdArrayOffset++] = 0;
        csdArray[csdArrayOffset++] = (byte) ((spsLength >> 8) & 0xFF);
        csdArray[csdArrayOffset++] = (byte) (spsLength & 0xFF);
        for (int j = 0; j < spsLength; j++) {
            csdArray[csdArrayOffset++] = buffer[currentBufferOffset + j];
        }
        currentBufferOffset += spsLength;
    }
    int numPPS = buffer[currentBufferOffset++]; // numOfPictureParameterSets
    for (int i = 0; i < numPPS; i++) {
        int ppsLength = ((buffer[currentBufferOffset++] & 0xFF) << 8 | buffer[currentBufferOffset++] & 0xFF)
                & 0x0000FFFF;
        csdArray[csdArrayOffset++] = 0;
        csdArray[csdArrayOffset++] = 0;
        csdArray[csdArrayOffset++] = (byte) ((ppsLength >> 8) & 0xFF);
        csdArray[csdArrayOffset++] = (byte) (ppsLength & 0xFF);
        for (int j = 0; j < ppsLength; j++) {
            csdArray[csdArrayOffset++] = buffer[currentBufferOffset + j];
        }
        currentBufferOffset += ppsLength;
    }
    ByteBuffer csdData = ByteBuffer.wrap(csdArray);
    csdData.limit(csdArrayOffset);
    return csdData;
}

From source file:ome.io.nio.RomioPixelBuffer.java

/**
 * Implemented as specified by {@link PixelBuffer} I/F.
 * @see PixelBuffer#setRegion(Integer, Long, byte[])
*//*from w  w  w .  ja  va 2  s . co m*/
public void setRegion(Integer size, Long offset, byte[] buffer) throws IOException {
    throwIfReadOnly();
    ByteBuffer buf = MappedByteBuffer.wrap(buffer);
    buf.limit(size);
    setRegion(size, offset, buf);
}

From source file:eu.faircode.netguard.SinkholeService.java

private void startDebug(final ParcelFileDescriptor pfd) {
    if (!debug)/*w  ww.j  a v a  2 s  . c o m*/
        return;

    thread = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                FileInputStream in = new FileInputStream(pfd.getFileDescriptor());
                FileOutputStream out = new FileOutputStream(pfd.getFileDescriptor());

                ByteBuffer buffer = ByteBuffer.allocate(32767);
                buffer.order(ByteOrder.BIG_ENDIAN);

                Log.i(TAG, "Start receiving");
                while (!Thread.currentThread().isInterrupted() && pfd.getFileDescriptor() != null
                        && pfd.getFileDescriptor().valid())
                    try {
                        buffer.clear();
                        int length = in.read(buffer.array());
                        if (length > 0) {
                            buffer.limit(length);
                            Packet pkt = new Packet(buffer);

                            if (pkt.IPv4.protocol == Packet.IPv4Header.TCP && pkt.TCP.SYN) {
                                int uid = pkt.getUid4();
                                if (uid < 0)
                                    Log.w(TAG, "uid not found");

                                String[] pkg = getPackageManager().getPackagesForUid(uid);
                                if (pkg == null)
                                    pkg = new String[] { uid == 0 ? "root" : "unknown" };

                                Log.i(TAG, "Connect " + pkt.IPv4.destinationAddress + ":"
                                        + pkt.TCP.destinationPort + " uid=" + uid + " pkg=" + pkg[0]);

                                // Send RST
                                pkt.swapAddresses();
                                pkt.TCP.clearFlags();
                                pkt.TCP.RST = true;
                                long ack = pkt.TCP.acknowledgementNumber;
                                pkt.TCP.acknowledgementNumber = (pkt.TCP.sequenceNumber + 1) & 0xFFFFFFFFL;
                                pkt.TCP.sequenceNumber = (ack + 1) & 0xFFFFFFFFL;
                                pkt.send(out);
                            }
                        }
                    } catch (Throwable ex) {
                        Log.e(TAG, ex.toString());
                    }
                Log.i(TAG, "End receiving");
            } catch (Throwable ex) {
                Log.e(TAG, ex.toString() + "\n" + Log.getStackTraceString(ex));
            }
        }
    });
    thread.start();
}