Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:com.glaf.core.util.ByteBufferUtils.java

public static int space(ByteBuffer buffer) {
    if (buffer == null) {
        return 0;
    }//from   ww w . j  a v  a 2 s .  co  m
    return buffer.capacity() - buffer.limit();
}

From source file:com.glaf.core.util.ByteBufferUtils.java

public static boolean compact(ByteBuffer buffer) {
    if (buffer.position() == 0) {
        return false;
    }//from w  ww  . ja  va2  s  . co m
    boolean full = buffer.limit() == buffer.capacity();
    buffer.compact().flip();
    return full && buffer.limit() < buffer.capacity();
}

From source file:org.lealone.cluster.utils.ByteBufferUtil.java

/**
 * Compare two ByteBuffer at specified offsets for length.
 * Compares the non equal bytes as unsigned.
 * @param bytes1 First byte buffer to compare.
 * @param offset1 Position to start the comparison at in the first array.
 * @param bytes2 Second byte buffer to compare.
 * @param offset2 Position to start the comparison at in the second array.
 * @param length How many bytes to compare?
 * @return -1 if byte1 is less than byte2, 1 if byte2 is less than byte1 or 0 if equal.
 *///  w  w w.  jav  a  2s.  c o  m
public static int compareSubArrays(ByteBuffer bytes1, int offset1, ByteBuffer bytes2, int offset2, int length) {
    if (bytes1 == null)
        return bytes2 == null ? 0 : -1;
    if (bytes2 == null)
        return 1;

    assert bytes1.limit() >= offset1
            + length : "The first byte array isn't long enough for the specified offset and length.";
    assert bytes2.limit() >= offset2
            + length : "The second byte array isn't long enough for the specified offset and length.";
    for (int i = 0; i < length; i++) {
        byte byte1 = bytes1.get(offset1 + i);
        byte byte2 = bytes2.get(offset2 + i);
        if (byte1 == byte2)
            continue;
        // compare non-equal bytes as unsigned
        return (byte1 & 0xFF) < (byte2 & 0xFF) ? -1 : 1;
    }
    return 0;
}

From source file:org.apache.tajo.storage.orc.OrcScanner.java

/**
 * Ensure this is an ORC file to prevent users from trying to read text
 * files or RC files as ORC files.//from  w  w w.  j  a  va 2  s  .  co m
 * @param in the file being read
 * @param path the filename for error messages
 * @param psLen the postscript length
 * @param buffer the tail of the file
 * @throws IOException
 */
static void ensureOrcFooter(FSDataInputStream in, Path path, int psLen, ByteBuffer buffer) throws IOException {
    int len = OrcFile.MAGIC.length();
    if (psLen < len + 1) {
        throw new IOException("Malformed ORC file " + path + ". Invalid postscript length " + psLen);
    }
    int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len;
    byte[] array = buffer.array();
    // now look for the magic string at the end of the postscript.
    if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) {
        // If it isn't there, this may be the 0.11.0 version of ORC.
        // Read the first 3 bytes of the file to check for the header
        byte[] header = new byte[len];
        in.readFully(0, header, 0, len);
        // if it isn't there, this isn't an ORC file
        if (!Text.decode(header, 0, len).equals(OrcFile.MAGIC)) {
            throw new IOException("Malformed ORC file " + path + ". Invalid postscript.");
        }
    }
}

From source file:guru.benson.pinch.Pinch.java

/**
 * Extract all ZipEntries from the ZIP central directory.
 *
 * @param buf/*from   w w w . j av a 2s  .co  m*/
 *     The byte buffer containing the ZIP central directory.
 *
 * @return A list with all ZipEntries.
 */
private static ArrayList<ExtendedZipEntry> parseHeaders(ByteBuffer buf) {
    ArrayList<ExtendedZipEntry> zeList = new ArrayList<ExtendedZipEntry>();

    buf.order(ByteOrder.LITTLE_ENDIAN);

    int offset = 0;

    while (offset < buf.limit() - ZipConstants.CENHDR) {
        short fileNameLen = buf.getShort(offset + ZipConstants.CENNAM);
        short extraFieldLen = buf.getShort(offset + ZipConstants.CENEXT);
        short fileCommentLen = buf.getShort(offset + ZipConstants.CENCOM);

        String fileName = new String(buf.array(), offset + ZipConstants.CENHDR, fileNameLen);

        ExtendedZipEntry zeGermans = new ExtendedZipEntry(fileName);

        zeGermans.setMethod(buf.getShort(offset + ZipConstants.CENHOW));

        CRC32 crc = new CRC32();
        crc.update(buf.getInt(offset + ZipConstants.CENCRC));
        zeGermans.setCrc(crc.getValue());

        zeGermans.setCompressedSize(buf.getInt(offset + ZipConstants.CENSIZ));
        zeGermans.setSize(buf.getInt(offset + ZipConstants.CENLEN));
        zeGermans.setInternalAttr(buf.getShort(offset + ZipConstants.CENATT));
        zeGermans.setExternalAttr(buf.getShort(offset + ZipConstants.CENATX));
        zeGermans.setOffset((long) buf.getInt(offset + ZipConstants.CENOFF));

        zeGermans.setExtraLength(extraFieldLen);

        zeList.add(zeGermans);
        offset += ZipConstants.CENHDR + fileNameLen + extraFieldLen + fileCommentLen;
    }

    return zeList;
}

From source file:org.apache.cassandra.utils.ByteBufferUtil.java

/**
 * Transfer bytes from one ByteBuffer to another.
 * This function acts as System.arrayCopy() but for ByteBuffers.
 *
 * @param src the source ByteBuffer//from w w  w  . ja  va2  s  . c  o m
 * @param srcPos starting position in the source ByteBuffer
 * @param dst the destination ByteBuffer
 * @param dstPos starting position in the destination ByteBuffer
 * @param length the number of bytes to copy
 */
public static void arrayCopy(ByteBuffer src, int srcPos, ByteBuffer dst, int dstPos, int length) {
    if (src.hasArray() && dst.hasArray()) {
        System.arraycopy(src.array(), src.arrayOffset() + srcPos, dst.array(), dst.arrayOffset() + dstPos,
                length);
    } else {
        if (src.limit() - srcPos < length || dst.limit() - dstPos < length)
            throw new IndexOutOfBoundsException();

        for (int i = 0; i < length; i++) {
            dst.put(dstPos++, src.get(srcPos++));
        }
    }
}

From source file:com.glaf.core.util.ByteBufferUtils.java

/**
 * ByteBuffer adaptation of org.apache.commons.lang3.ArrayUtils.lastIndexOf
 * method// w  ww  .java  2s . c  o  m
 * 
 * @param buffer
 *            the array to traverse for looking for the object, may be
 *            <code>null</code>
 * @param valueToFind
 *            the value to find
 * @param startIndex
 *            the start index (i.e. BB position) to travers backwards from
 * @return the last index (i.e. BB position) of the value within the array
 *         [between buffer.position() and buffer.limit()]; <code>-1</code>
 *         if not found.
 */
public static int lastIndexOf(ByteBuffer buffer, byte valueToFind, int startIndex) {
    assert buffer != null;

    if (startIndex < buffer.position()) {
        return -1;
    } else if (startIndex >= buffer.limit()) {
        startIndex = buffer.limit() - 1;
    }

    for (int i = startIndex; i >= buffer.position(); i--) {
        if (valueToFind == buffer.get(i))
            return i;
    }

    return -1;
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

/**
 * Convert a few next bytes in the given buffer at the given position to
 * string. Used for error messages./*  w  w w . j  a va 2s.  c om*/
 */
private static String nextBytesToStr(ByteBuffer buf, int pos) {
    int maxBytes = buf.limit() - pos;
    int numBytes = Math.min(16, maxBytes);
    return Bytes.toStringBinary(buf.array(), buf.arrayOffset() + pos, numBytes)
            + (numBytes < maxBytes ? "..." : "");
}

From source file:com.amazonaws.services.kinesis.clientlibrary.types.UserRecord.java

/**
 * This method deaggregates the given list of Amazon Kinesis records into a
 * list of KPL user records. Any KPL user records whose explicit hash key or
 * partition key falls outside the range of the startingHashKey and the
 * endingHashKey are discarded from the resulting list. This method will
 * then return the resulting list of KPL user records.
 * //from w ww .j  a  va2s  .  c om
 * @param records
 *            A list of Amazon Kinesis records, each possibly aggregated.
 * @param startingHashKey
 *            A BigInteger representing the starting hash key that the
 *            explicit hash keys or partition keys of retained resulting KPL
 *            user records must be greater than or equal to.
 * @param endingHashKey
 *            A BigInteger representing the ending hash key that the the
 *            explicit hash keys or partition keys of retained resulting KPL
 *            user records must be smaller than or equal to.
 * @return A resulting list of KPL user records whose explicit hash keys or
 *          partition keys fall within the range of the startingHashKey and
 *          the endingHashKey.
 */
// CHECKSTYLE:OFF NPathComplexity
public static List<UserRecord> deaggregate(List<Record> records, BigInteger startingHashKey,
        BigInteger endingHashKey) {
    List<UserRecord> result = new ArrayList<>();
    byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length];
    byte[] digest = new byte[DIGEST_SIZE];

    for (Record r : records) {
        boolean isAggregated = true;
        long subSeqNum = 0;
        ByteBuffer bb = r.getData();

        if (bb.remaining() >= magic.length) {
            bb.get(magic);
        } else {
            isAggregated = false;
        }

        if (!Arrays.equals(AGGREGATED_RECORD_MAGIC, magic) || bb.remaining() <= DIGEST_SIZE) {
            isAggregated = false;
        }

        if (isAggregated) {
            int oldLimit = bb.limit();
            bb.limit(oldLimit - DIGEST_SIZE);
            byte[] messageData = new byte[bb.remaining()];
            bb.get(messageData);
            bb.limit(oldLimit);
            bb.get(digest);
            byte[] calculatedDigest = md5(messageData);

            if (!Arrays.equals(digest, calculatedDigest)) {
                isAggregated = false;
            } else {
                try {
                    Messages.AggregatedRecord ar = Messages.AggregatedRecord.parseFrom(messageData);
                    List<String> pks = ar.getPartitionKeyTableList();
                    List<String> ehks = ar.getExplicitHashKeyTableList();
                    long aat = r.getApproximateArrivalTimestamp() == null ? -1
                            : r.getApproximateArrivalTimestamp().getTime();
                    try {
                        int recordsInCurrRecord = 0;
                        for (Messages.Record mr : ar.getRecordsList()) {
                            String explicitHashKey = null;
                            String partitionKey = pks.get((int) mr.getPartitionKeyIndex());
                            if (mr.hasExplicitHashKeyIndex()) {
                                explicitHashKey = ehks.get((int) mr.getExplicitHashKeyIndex());
                            }

                            BigInteger effectiveHashKey = explicitHashKey != null
                                    ? new BigInteger(explicitHashKey)
                                    : new BigInteger(1, md5(partitionKey.getBytes("UTF-8")));

                            if (effectiveHashKey.compareTo(startingHashKey) < 0
                                    || effectiveHashKey.compareTo(endingHashKey) > 0) {
                                for (int toRemove = 0; toRemove < recordsInCurrRecord; ++toRemove) {
                                    result.remove(result.size() - 1);
                                }
                                break;
                            }

                            ++recordsInCurrRecord;
                            Record record = new Record().withData(ByteBuffer.wrap(mr.getData().toByteArray()))
                                    .withPartitionKey(partitionKey).withSequenceNumber(r.getSequenceNumber())
                                    .withApproximateArrivalTimestamp(aat < 0 ? null : new Date(aat));
                            result.add(new UserRecord(true, record, subSeqNum++, explicitHashKey));
                        }
                    } catch (Exception e) {
                        StringBuilder sb = new StringBuilder();
                        sb.append("Unexpected exception during deaggregation, record was:\n");
                        sb.append("PKS:\n");
                        for (String s : pks) {
                            sb.append(s).append("\n");
                        }
                        sb.append("EHKS: \n");
                        for (String s : ehks) {
                            sb.append(s).append("\n");
                        }
                        for (Messages.Record mr : ar.getRecordsList()) {
                            sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ")
                                    .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ")
                                    .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ")
                                    .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n");
                        }
                        sb.append("Sequence number: ").append(r.getSequenceNumber()).append("\n")
                                .append("Raw data: ")
                                .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData))
                                .append("\n");
                        LOG.error(sb.toString(), e);
                    }
                } catch (InvalidProtocolBufferException e) {
                    isAggregated = false;
                }
            }
        }

        if (!isAggregated) {
            bb.rewind();
            result.add(new UserRecord(r));
        }
    }
    return result;
}

From source file:com.unister.semweb.drums.TestUtils.java

/**
 * This function checks, if the file with the given filename contains exactly the given LinkData-objects.
 * //from  w w  w.ja  v  a  2  s  . c  o  m
 * @param dbFileName
 *            the name of the file
 * @param linkDataList
 *            the array, containing LinkData
 * @throws IOException
 * @throws FileLockException
 */
public static boolean checkContentFile(String dbFileName, DummyKVStorable[] linkDataList)
        throws IOException, FileLockException {
    // load file
    DummyKVStorable prototype = gp.getPrototype();
    HeaderIndexFile<DummyKVStorable> dbfile = new HeaderIndexFile<DummyKVStorable>(dbFileName, 1, TestUtils.gp);
    ByteBuffer buffer = ByteBuffer.allocate(prototype.getSize());
    long offset = 0;
    int k = 0;
    while (offset < dbfile.getFilledUpFromContentStart()) {
        dbfile.read(offset, buffer);
        buffer.flip();
        DummyKVStorable newLinkData = (DummyKVStorable) prototype.fromByteBuffer(buffer);
        if (!newLinkData.equals(linkDataList[k])) {
            return false;
        }
        k++;
        offset += buffer.limit();
        buffer.clear();
    }
    dbfile.close();
    return true;
}