Example usage for java.nio ByteBuffer get

List of usage examples for java.nio ByteBuffer get

Introduction

In this page you can find the example usage for java.nio ByteBuffer get.

Prototype

public abstract byte get(int index);

Source Link

Document

Returns the byte at the specified index and does not change the position.

Usage

From source file:io.mycat.util.ByteBufferUtil.java

public static int getShortLength(ByteBuffer bb, int position) {
    int length = (bb.get(position) & 0xFF) << 8;
    return length | (bb.get(position + 1) & 0xFF);
}

From source file:com.icloud.framework.core.nio.ByteBufferUtil.java

public static ByteBuffer clone(ByteBuffer o) {
    assert o != null;

    if (o.remaining() == 0)
        return ByteBuffer.wrap(ArrayUtils.EMPTY_BYTE_ARRAY);

    ByteBuffer clone = ByteBuffer.allocate(o.remaining());

    if (o.isDirect()) {
        for (int i = o.position(); i < o.limit(); i++) {
            clone.put(o.get(i));
        }// w w  w  .ja v  a  2  s.  c  o m
        clone.flip();
    } else {
        System.arraycopy(o.array(), o.arrayOffset() + o.position(), clone.array(), 0, o.remaining());
    }

    return clone;
}

From source file:net.onrc.openvirtex.packet.OVXLLDP.java

/**
 * Checks if LLDP packet has correct size, LLDP multicast address, and
 * ethertype. Packet assumed to have Ethernet header.
 *
 * @param packet//from  w w w . j a  v  a 2  s  .  c  om
 * @return true if packet is LLDP, false otherwise
 */
public static boolean isLLDP(final byte[] packet) {
    // Does packet exist and does it have the mininum size?
    if (packet == null || packet.length < MINIMUM_LLDP_SIZE) {
        return false;
    }

    // Packet has LLDP multicast destination address?
    final ByteBuffer bb = ByteBuffer.wrap(packet);
    final byte[] dst = new byte[6];
    bb.get(dst);

    if (!(Arrays.equals(dst, OVXLLDP.LLDP_NICIRA) || Arrays.equals(dst, OVXLLDP.LLDP_MULTICAST)
            || Arrays.equals(dst, OVXLLDP.BDDP_MULTICAST))) {

        return false;
    }

    // Fetch ethertype, skip VLAN tag if it's there
    short etherType = bb.getShort(ETHERTYPE_OFFSET);
    if (etherType == ETHERTYPE_VLAN) {
        etherType = bb.getShort(ETHERTYPE_OFFSET + 4);
    }

    // Check ethertype
    if (etherType == Ethernet.TYPE_LLDP) {
        return true;
    }
    if (etherType == Ethernet.TYPE_BSN) {
        return true;
    }

    return false;

}

From source file:com.googlecode.jcimd.TextMessageUserDataFactory.java

private static byte[] encodeAs(Charset charset, String textMessage) {
    CharsetEncoder encoder = charset.newEncoder();
    ByteBuffer byteBuffer = ByteBuffer
            .allocate(textMessage.length() * (int) Math.ceil(encoder.maxBytesPerChar()));
    encoder.encode(CharBuffer.wrap(textMessage), byteBuffer, true);
    byte[] bytes = new byte[byteBuffer.position()];
    byteBuffer.flip();//from  ww w.ja  v  a 2s.  c  om
    byteBuffer.get(bytes);
    return bytes;
}

From source file:com.amazonaws.services.kinesis.clientlibrary.types.UserRecord.java

/**
 * This method deaggregates the given list of Amazon Kinesis records into a
 * list of KPL user records. Any KPL user records whose explicit hash key or
 * partition key falls outside the range of the startingHashKey and the
 * endingHashKey are discarded from the resulting list. This method will
 * then return the resulting list of KPL user records.
 * //  w  ww  . java2s .  c  om
 * @param records
 *            A list of Amazon Kinesis records, each possibly aggregated.
 * @param startingHashKey
 *            A BigInteger representing the starting hash key that the
 *            explicit hash keys or partition keys of retained resulting KPL
 *            user records must be greater than or equal to.
 * @param endingHashKey
 *            A BigInteger representing the ending hash key that the the
 *            explicit hash keys or partition keys of retained resulting KPL
 *            user records must be smaller than or equal to.
 * @return A resulting list of KPL user records whose explicit hash keys or
 *          partition keys fall within the range of the startingHashKey and
 *          the endingHashKey.
 */
// CHECKSTYLE:OFF NPathComplexity
public static List<UserRecord> deaggregate(List<Record> records, BigInteger startingHashKey,
        BigInteger endingHashKey) {
    List<UserRecord> result = new ArrayList<>();
    byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length];
    byte[] digest = new byte[DIGEST_SIZE];

    for (Record r : records) {
        boolean isAggregated = true;
        long subSeqNum = 0;
        ByteBuffer bb = r.getData();

        if (bb.remaining() >= magic.length) {
            bb.get(magic);
        } else {
            isAggregated = false;
        }

        if (!Arrays.equals(AGGREGATED_RECORD_MAGIC, magic) || bb.remaining() <= DIGEST_SIZE) {
            isAggregated = false;
        }

        if (isAggregated) {
            int oldLimit = bb.limit();
            bb.limit(oldLimit - DIGEST_SIZE);
            byte[] messageData = new byte[bb.remaining()];
            bb.get(messageData);
            bb.limit(oldLimit);
            bb.get(digest);
            byte[] calculatedDigest = md5(messageData);

            if (!Arrays.equals(digest, calculatedDigest)) {
                isAggregated = false;
            } else {
                try {
                    Messages.AggregatedRecord ar = Messages.AggregatedRecord.parseFrom(messageData);
                    List<String> pks = ar.getPartitionKeyTableList();
                    List<String> ehks = ar.getExplicitHashKeyTableList();
                    long aat = r.getApproximateArrivalTimestamp() == null ? -1
                            : r.getApproximateArrivalTimestamp().getTime();
                    try {
                        int recordsInCurrRecord = 0;
                        for (Messages.Record mr : ar.getRecordsList()) {
                            String explicitHashKey = null;
                            String partitionKey = pks.get((int) mr.getPartitionKeyIndex());
                            if (mr.hasExplicitHashKeyIndex()) {
                                explicitHashKey = ehks.get((int) mr.getExplicitHashKeyIndex());
                            }

                            BigInteger effectiveHashKey = explicitHashKey != null
                                    ? new BigInteger(explicitHashKey)
                                    : new BigInteger(1, md5(partitionKey.getBytes("UTF-8")));

                            if (effectiveHashKey.compareTo(startingHashKey) < 0
                                    || effectiveHashKey.compareTo(endingHashKey) > 0) {
                                for (int toRemove = 0; toRemove < recordsInCurrRecord; ++toRemove) {
                                    result.remove(result.size() - 1);
                                }
                                break;
                            }

                            ++recordsInCurrRecord;
                            Record record = new Record().withData(ByteBuffer.wrap(mr.getData().toByteArray()))
                                    .withPartitionKey(partitionKey).withSequenceNumber(r.getSequenceNumber())
                                    .withApproximateArrivalTimestamp(aat < 0 ? null : new Date(aat));
                            result.add(new UserRecord(true, record, subSeqNum++, explicitHashKey));
                        }
                    } catch (Exception e) {
                        StringBuilder sb = new StringBuilder();
                        sb.append("Unexpected exception during deaggregation, record was:\n");
                        sb.append("PKS:\n");
                        for (String s : pks) {
                            sb.append(s).append("\n");
                        }
                        sb.append("EHKS: \n");
                        for (String s : ehks) {
                            sb.append(s).append("\n");
                        }
                        for (Messages.Record mr : ar.getRecordsList()) {
                            sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ")
                                    .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ")
                                    .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ")
                                    .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n");
                        }
                        sb.append("Sequence number: ").append(r.getSequenceNumber()).append("\n")
                                .append("Raw data: ")
                                .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData))
                                .append("\n");
                        LOG.error(sb.toString(), e);
                    }
                } catch (InvalidProtocolBufferException e) {
                    isAggregated = false;
                }
            }
        }

        if (!isAggregated) {
            bb.rewind();
            result.add(new UserRecord(r));
        }
    }
    return result;
}

From source file:Main.java

/**
 * @param columnarKeyBlockData//from   ww w  .ja v a 2 s.  co  m
 * @param columnarKeyStoreMetadata
 * @return
 * @author s71955 The high cardinality dimensions rows will be send in byte
 * array with its data length appended in the
 * ColumnarKeyStoreDataHolder byte array since high cardinality dim
 * data will not be part of MDKey/Surrogate keys. In this method the
 * byte array will be scanned and the length which is stored in
 * short will be removed.
 */
public static List<byte[]> readColumnarKeyBlockDataForNoDictionaryCols(byte[] columnarKeyBlockData) {
    List<byte[]> columnarKeyBlockDataList = new ArrayList<byte[]>(50);
    ByteBuffer noDictionaryValKeyStoreDataHolder = ByteBuffer.allocate(columnarKeyBlockData.length);
    noDictionaryValKeyStoreDataHolder.put(columnarKeyBlockData);
    noDictionaryValKeyStoreDataHolder.flip();
    while (noDictionaryValKeyStoreDataHolder.hasRemaining()) {
        short dataLength = noDictionaryValKeyStoreDataHolder.getShort();
        byte[] noDictionaryValKeyData = new byte[dataLength];
        noDictionaryValKeyStoreDataHolder.get(noDictionaryValKeyData);
        columnarKeyBlockDataList.add(noDictionaryValKeyData);
    }
    return columnarKeyBlockDataList;

}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraKeyValueServices.java

static Pair<byte[], Long> decompose(ByteBuffer composite) {
    composite = composite.slice().order(ByteOrder.BIG_ENDIAN);

    short len = composite.getShort();
    byte[] colName = new byte[len];
    composite.get(colName);

    short shouldBeZero = composite.getShort();
    Validate.isTrue(shouldBeZero == 0);/*from   w  ww  .  j a va 2 s.  c  om*/

    byte shouldBe8 = composite.get();
    Validate.isTrue(shouldBe8 == 8);
    long ts = composite.getLong();

    return Pair.create(colName, (~ts));
}

From source file:com.github.blindpirate.gogradle.util.IOUtils.java

public static byte[] toByteArray(ByteBuffer buf) {
    ((Buffer) buf).position(0);/*from w  w  w  .  j  av a  2 s.  c o  m*/
    byte[] ret = new byte[buf.remaining()];
    buf.get(ret);
    return ret;
}

From source file:com.glaf.core.util.BinaryUtils.java

/**
 * Returns a copy of all the bytes from the given <code>ByteBuffer</code>,
 * from the beginning to the buffer's limit; or null if the input is null.
 * <p>//from  w  ww  .  jav  a  2 s.  c  o m
 * The internal states of the given byte buffer will be restored when this
 * method completes execution.
 * <p>
 * When handling <code>ByteBuffer</code> from user's input, it's typical to
 * call the {@link #copyBytesFrom(ByteBuffer)} instead of
 * {@link #copyAllBytesFrom(ByteBuffer)} so as to account for the position
 * of the input <code>ByteBuffer</code>. The opposite is typically true,
 * however, when handling <code>ByteBuffer</code> from withint the
 * unmarshallers of the low-level clients.
 */
public static byte[] copyAllBytesFrom(ByteBuffer bb) {
    if (bb == null)
        return null;
    if (bb.hasArray())
        return Arrays.copyOf(bb.array(), bb.limit());
    bb.mark();
    // the default ByteBuffer#mark() and reset() won't work, as the
    // rewind would discard the mark position
    final int marked = bb.position();
    try {
        byte[] dst = new byte[bb.rewind().remaining()];
        bb.get(dst);
        return dst;
    } finally {
        bb.position(marked);
    }
}

From source file:edu.stanford.mobisocial.dungbeetle.model.DbObject.java

private static int colorFor(Long hash) {
    float[] baseHues = Feed.getBaseHues();
    ByteBuffer bos = ByteBuffer.allocate(8);
    bos.putLong(hash);//from   www.j av a  2 s  .c  o  m
    byte[] hashBytes = new byte[8];
    bos.position(0);
    bos.get(hashBytes);
    SecureRandom r = new SecureRandom(hashBytes);
    float hsv[] = new float[] { baseHues[r.nextInt(baseHues.length)], r.nextFloat(), r.nextFloat() };
    hsv[0] = hsv[0] + 20 * r.nextFloat() - 10;
    hsv[1] = hsv[1] * 0.2f + 0.8f;
    hsv[2] = hsv[2] * 0.2f + 0.8f;
    return Color.HSVToColor(hsv);
}