Example usage for java.nio ByteBuffer duplicate

List of usage examples for java.nio ByteBuffer duplicate

Introduction

In this page you can find the example usage for java.nio ByteBuffer duplicate.

Prototype

public abstract ByteBuffer duplicate();

Source Link

Document

Returns a duplicated buffer that shares its content with this buffer.

Usage

From source file:org.apache.cassandra.utils.ByteBufferUtil.java

/**
 * You should almost never use this.  Instead, use the write* methods to avoid copies.
 *///from   w ww. j  av a  2  s. c  o m
public static byte[] getArray(ByteBuffer buffer) {
    int length = buffer.remaining();

    if (buffer.hasArray()) {
        int start = buffer.position();
        if (buffer.arrayOffset() == 0 && start == 0 && length == buffer.array().length)
            return buffer.array();
        else
            return Arrays.copyOfRange(buffer.array(), start + buffer.arrayOffset(),
                    start + length + buffer.arrayOffset());
    }
    // else, DirectByteBuffer.get() is the fastest route
    byte[] bytes = new byte[length];
    buffer.duplicate().get(bytes);

    return bytes;
}

From source file:org.apache.cassandra.utils.FBUtilities.java

public static byte[] hash(ByteBuffer... data) {
    MessageDigest messageDigest = localMD5Digest.get();
    for (ByteBuffer block : data) {
        messageDigest.update(block.duplicate());
    }/*from   w  w  w . java 2 s .c  om*/

    return messageDigest.digest();
}

From source file:org.apache.commons.crypto.examples.CipherByteBufferExample.java

public static void main(String[] args) throws Exception {
    final SecretKeySpec key = new SecretKeySpec(getUTF8Bytes("1234567890123456"), "AES");
    final IvParameterSpec iv = new IvParameterSpec(getUTF8Bytes("1234567890123456"));
    Properties properties = new Properties();
    //Creates a CryptoCipher instance with the transformation and properties.
    final String transform = "AES/CBC/PKCS5Padding";
    final ByteBuffer outBuffer;
    final int bufferSize = 1024;
    final int updateBytes;
    final int finalBytes;
    try (CryptoCipher encipher = Utils.getCipherInstance(transform, properties)) {

        ByteBuffer inBuffer = ByteBuffer.allocateDirect(bufferSize);
        outBuffer = ByteBuffer.allocateDirect(bufferSize);
        inBuffer.put(getUTF8Bytes("hello world!"));

        inBuffer.flip(); // ready for the cipher to read it
        // Show the data is there
        System.out.println("inBuffer=" + asString(inBuffer));

        // Initializes the cipher with ENCRYPT_MODE,key and iv.
        encipher.init(Cipher.ENCRYPT_MODE, key, iv);
        // Continues a multiple-part encryption/decryption operation for byte buffer.
        updateBytes = encipher.update(inBuffer, outBuffer);
        System.out.println(updateBytes);

        // We should call do final at the end of encryption/decryption.
        finalBytes = encipher.doFinal(inBuffer, outBuffer);
        System.out.println(finalBytes);
    }/*from   w ww  . j  av  a  2 s. c om*/

    outBuffer.flip(); // ready for use as decrypt
    byte[] encoded = new byte[updateBytes + finalBytes];
    outBuffer.duplicate().get(encoded);
    System.out.println(Arrays.toString(encoded));

    // Now reverse the process
    try (CryptoCipher decipher = Utils.getCipherInstance(transform, properties)) {
        decipher.init(Cipher.DECRYPT_MODE, key, iv);
        ByteBuffer decoded = ByteBuffer.allocateDirect(bufferSize);
        decipher.update(outBuffer, decoded);
        decipher.doFinal(outBuffer, decoded);
        decoded.flip(); // ready for use
        System.out.println("decoded=" + asString(decoded));
    }
}

From source file:org.apache.commons.crypto.examples.CipherByteBufferExample.java

/**
 * Converts ByteBuffer to String//from w ww . j  av a2  s .  c  o  m
 * 
 * @param buffer input byte buffer
 * @return the converted string
 */
private static String asString(ByteBuffer buffer) {
    final ByteBuffer copy = buffer.duplicate();
    final byte[] bytes = new byte[Math.min(copy.remaining(), 50)];
    copy.get(bytes);
    return new String(bytes, StandardCharsets.UTF_8);
}

From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testBufferSwap() {
    ByteBuffer biggerOffset = makeCollectorBuffer(1, (byte) 0x00, 0x11);
    ByteBuffer smallerOffset = makeCollectorBuffer(0, (byte) 0x20, 0x00);

    ByteBuffer buffer = ByteBuffer.allocate(HyperLogLogCollector.getLatestNumBytesForDenseStorage());
    HyperLogLogCollector collector = HyperLogLogCollector.makeCollector(buffer.duplicate());

    // make sure the original buffer gets modified
    collector.fold(biggerOffset);/*w ww.j  a va2s  .  c  o m*/
    Assert.assertEquals(collector, HyperLogLogCollector.makeCollector(buffer.duplicate()));

    // make sure the original buffer gets modified
    collector.fold(smallerOffset);
    Assert.assertEquals(collector, HyperLogLogCollector.makeCollector(buffer.duplicate()));
}

From source file:org.apache.hadoop.hbase.util.Bytes.java

/**
 * @param a left operand/*from  w  ww .java  2s  .  co  m*/
 * @param buf right operand
 * @return True if equal
 */
public static boolean equals(byte[] a, ByteBuffer buf) {
    if (a == null)
        return buf == null;
    if (buf == null)
        return false;
    if (a.length != buf.remaining())
        return false;

    // Thou shalt not modify the original byte buffer in what should be read only operations.
    ByteBuffer b = buf.duplicate();
    for (byte anA : a) {
        if (anA != b.get()) {
            return false;
        }
    }
    return true;
}

From source file:org.apache.hadoop.hdfs.BlockReaderLocalLegacy.java

/**
 * Tries to read as many bytes as possible into supplied buffer, checksumming
 * each chunk if needed./*from   w ww.ja  v a 2  s.  c o m*/
 *
 * <b>Preconditions:</b>
 * <ul>
 * <li>
 * If checksumming is enabled, buf.remaining must be a multiple of
 * bytesPerChecksum. Note that this is not a requirement for clients of
 * read(ByteBuffer) - in the case of non-checksum-sized read requests,
 * read(ByteBuffer) will substitute a suitably sized buffer to pass to this
 * method.
 * </li>
 * </ul>
 * <b>Postconditions:</b>
 * <ul>
 * <li>buf.limit and buf.mark are unchanged.</li>
 * <li>buf.position += min(offsetFromChunkBoundary, totalBytesRead) - so the
 * requested bytes can be read straight from the buffer</li>
 * </ul>
 *
 * @param buf
 *          byte buffer to write bytes to. If checksums are not required, buf
 *          can have any number of bytes remaining, otherwise there must be a
 *          multiple of the checksum chunk size remaining.
 * @return <tt>max(min(totalBytesRead, len) - offsetFromChunkBoundary, 0)</tt>
 *         that is, the the number of useful bytes (up to the amount
 *         requested) readable from the buffer by the client.
 */
private synchronized int doByteBufferRead(ByteBuffer buf) throws IOException {
    if (verifyChecksum) {
        assert buf.remaining() % bytesPerChecksum == 0;
    }
    int dataRead = -1;

    int oldpos = buf.position();
    // Read as much as we can into the buffer.
    dataRead = fillBuffer(dataIn, buf);

    if (dataRead == -1) {
        return -1;
    }

    if (verifyChecksum) {
        ByteBuffer toChecksum = buf.duplicate();
        toChecksum.position(oldpos);
        toChecksum.limit(oldpos + dataRead);

        checksumBuff.clear();
        // Equivalent to (int)Math.ceil(toChecksum.remaining() * 1.0 / bytesPerChecksum );
        int numChunks = (toChecksum.remaining() + bytesPerChecksum - 1) / bytesPerChecksum;
        checksumBuff.limit(checksumSize * numChunks);

        fillBuffer(checksumIn, checksumBuff);
        checksumBuff.flip();

        checksum.verifyChunkedSums(toChecksum, checksumBuff, filename, this.startOffset);
    }

    if (dataRead >= 0) {
        buf.position(oldpos + Math.min(offsetFromChunkBoundary, dataRead));
    }

    if (dataRead < offsetFromChunkBoundary) {
        // yikes, didn't even get enough bytes to honour offset. This can happen
        // even if we are verifying checksums if we are at EOF.
        offsetFromChunkBoundary -= dataRead;
        dataRead = 0;
    } else {
        dataRead -= offsetFromChunkBoundary;
        offsetFromChunkBoundary = 0;
    }

    return dataRead;
}

From source file:org.apache.hadoop.hdfs.server.datanode.DWRRBlockReceiver.java

/**
 * Receives and processes a packet. It can contain many chunks.
 * returns the number of data bytes that the packet has.
 *//* w  w  w  .j a va2s  . c o  m*/
private int receivePacket() throws IOException {
    // read the next packet
    packetReceiver.receiveNextPacket(in);

    PacketHeader header = packetReceiver.getHeader();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Receiving one packet for block " + block + ": " + header);
    }

    // Sanity check the header
    if (header.getOffsetInBlock() > replicaInfo.getNumBytes()) {
        throw new IOException("Received an out-of-sequence packet for " + block + "from " + inAddr
                + " at offset " + header.getOffsetInBlock() + ". Expecting packet starting at "
                + replicaInfo.getNumBytes());
    }
    if (header.getDataLen() < 0) {
        throw new IOException("Got wrong length during writeBlock(" + block + ") from " + inAddr + " at offset "
                + header.getOffsetInBlock() + ": " + header.getDataLen());
    }

    long offsetInBlock = header.getOffsetInBlock();
    long seqno = header.getSeqno();
    boolean lastPacketInBlock = header.isLastPacketInBlock();
    int len = header.getDataLen();
    boolean syncBlock = header.getSyncBlock();

    // avoid double sync'ing on close
    if (syncBlock && lastPacketInBlock) {
        this.syncOnClose = false;
    }

    // update received bytes
    long firstByteInBlock = offsetInBlock;
    offsetInBlock += len;
    if (replicaInfo.getNumBytes() < offsetInBlock) {
        replicaInfo.setNumBytes(offsetInBlock);
    }

    // put in queue for pending acks, unless sync was requested
    if (responder != null && !syncBlock && !shouldVerifyChecksum()) {
        ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock,
                Status.SUCCESS);
    }

    //First write the packet to the mirror:
    if (mirrorOut != null && !mirrorError) {
        try {
            long begin = Time.monotonicNow();
            packetReceiver.mirrorPacketTo(mirrorOut);
            mirrorOut.flush();
            long duration = Time.monotonicNow() - begin;
            if (duration > datanodeSlowLogThresholdMs) {
                LOG.warn("Slow DWRRBlockReceiver write packet to mirror took " + duration + "ms (threshold="
                        + datanodeSlowLogThresholdMs + "ms)");
            }
        } catch (IOException e) {
            handleMirrorOutError(e);
        }
    }

    ByteBuffer dataBuf = packetReceiver.getDataSlice();
    ByteBuffer checksumBuf = packetReceiver.getChecksumSlice();

    if (lastPacketInBlock || len == 0) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Receiving an empty packet or the end of the block " + block);
        }
        // sync block if requested
        //      if (syncBlock) {
        //        flushOrSync(true);
        //      }
    } else {
        int checksumLen = ((len + bytesPerChecksum - 1) / bytesPerChecksum) * checksumSize;

        if (checksumBuf.capacity() != checksumLen) {
            throw new IOException("Length of checksums in packet " + checksumBuf.capacity()
                    + " does not match calculated checksum " + "length " + checksumLen);
        }

        if (shouldVerifyChecksum()) {
            try {
                verifyChunks(dataBuf, checksumBuf);
            } catch (IOException ioe) {
                // checksum error detected locally. there is no reason to continue.
                if (responder != null) {
                    try {
                        ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock,
                                offsetInBlock, Status.ERROR_CHECKSUM);
                        // Wait until the responder sends back the response
                        // and interrupt this thread.
                        Thread.sleep(3000);
                    } catch (InterruptedException e) {
                    }
                }
                throw new IOException("Terminating due to a checksum error." + ioe);
            }

            if (needsChecksumTranslation) {
                // overwrite the checksums in the packet buffer with the
                // appropriate polynomial for the disk storage.
                translateChunks(dataBuf, checksumBuf);
            }
        }

        // by this point, the data in the buffer uses the disk checksum

        try {
            long onDiskLen = replicaInfo.getBytesOnDisk();
            if (onDiskLen < offsetInBlock) {
                //finally write to the disk :

                //          if (onDiskLen % bytesPerChecksum != 0) {
                //            // prepare to overwrite last checksum
                //            adjustCrcFilePosition();
                //          }

                // If this is a partial chunk, then read in pre-existing checksum
                if (firstByteInBlock % bytesPerChecksum != 0) {
                    LOG.info("Packet starts at " + firstByteInBlock + " for " + block
                            + " which is not a multiple of bytesPerChecksum " + bytesPerChecksum);
                    long offsetInChecksum = BlockMetadataHeader.getHeaderSize()
                            + onDiskLen / bytesPerChecksum * checksumSize;
                    computePartialChunkCrc(onDiskLen, offsetInChecksum, bytesPerChecksum);
                }

                int startByteToDisk = (int) (onDiskLen - firstByteInBlock) + dataBuf.arrayOffset()
                        + dataBuf.position();

                int numBytesToDisk = (int) (offsetInBlock - onDiskLen);

                toBeWritten.add(new DWRRWriteRequest(dataBuf.array().clone(), startByteToDisk, numBytesToDisk,
                        checksumBuf.duplicate(), checksumBuf.array().clone(), checksumLen, len, offsetInBlock,
                        syncBlock, lastPacketInBlock));

                ChunkChecksum last = replicaInfo.getLastChecksumAndDataLen();

                if (offsetInBlock % bytesPerChecksum != 0) {
                    LOG.error("CAMAMILLA " + this + "  mod onDiskLen petara offsetInBlock " + offsetInBlock
                            + " bytesPerChecksum " + bytesPerChecksum); // TODO TODO log
                }
                replicaInfo.setLastChecksumAndDataLen(offsetInBlock, last.getChecksum());

            }
        } catch (IOException iex) {
            datanode.checkDiskError();
            throw iex;
        }
    }

    // if sync was requested, put in queue for pending acks here
    // (after the fsync finished)
    if (responder != null && (syncBlock || shouldVerifyChecksum())) {
        LOG.info("CAMAMILLA " + this + "  PacketResponder enqueue ack al llegir de xarxa 1"); // TODO TODO log
        ((PacketResponder) responder.getRunnable()).enqueue(seqno, lastPacketInBlock, offsetInBlock,
                Status.SUCCESS);
    }

    if (throttler != null) { // throttle I/O
        throttler.throttle(len);
    }

    return lastPacketInBlock ? -1 : len;
}

From source file:org.apache.hadoop.hive.llap.tezplugins.LlapTaskCommunicator.java

private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerId, TaskSpec taskSpec,
        FragmentRuntimeInfo fragmentRuntimeInfo, String hiveQueryId) throws IOException {
    SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder();
    builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId());
    builder.setAttemptNumber(taskSpec.getTaskAttemptID().getId());
    builder.setContainerIdString(containerId.toString());
    builder.setAmHost(getAmHostString());
    builder.setAmPort(getAddress().getPort());

    Preconditions.checkState(currentQueryIdentifierProto.getDagIndex() == taskSpec.getTaskAttemptID()
            .getTaskID().getVertexID().getDAGId().getId());
    ByteBuffer credentialsBinary = credentialMap.get(currentQueryIdentifierProto);
    if (credentialsBinary == null) {
        credentialsBinary = serializeCredentials(getContext().getCurrentDagInfo().getCredentials());
        credentialMap.putIfAbsent(currentQueryIdentifierProto, credentialsBinary.duplicate());
    } else {/*from w  w  w.  ja  v a2s. c  o  m*/
        credentialsBinary = credentialsBinary.duplicate();
    }
    builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
    builder.setWorkSpec(VertexOrBinary.newBuilder().setVertex(Converters.constructSignableVertexSpec(taskSpec,
            currentQueryIdentifierProto, getTokenIdentifier(), user, hiveQueryId)).build());
    // Don't call builder.setWorkSpecSignature() - Tez doesn't sign fragments
    builder.setFragmentRuntimeInfo(fragmentRuntimeInfo);
    return builder.build();
}

From source file:org.apache.hadoop.hive.ql.io.orc.ExternalCache.java

private static OrcTail createOrcTailFromMs(HdfsFileStatusWithId file, ByteBuffer bb) throws IOException {
    if (bb == null)
        return null;
    FileStatus fs = file.getFileStatus();
    ByteBuffer copy = bb.duplicate();
    try {/*from   ww w .jav  a2  s.  c  om*/
        OrcTail orcTail = ReaderImpl.extractFileTail(copy, fs.getLen(), fs.getModificationTime());
        // trigger lazy read of metadata to make sure serialized data is not corrupted and readable
        orcTail.getStripeStatistics();
        return orcTail;
    } catch (Exception ex) {
        byte[] data = new byte[bb.remaining()];
        System.arraycopy(bb.array(), bb.arrayOffset() + bb.position(), data, 0, data.length);
        String msg = "Failed to parse the footer stored in cache for file ID " + file.getFileId() + " " + bb
                + " [ " + Hex.encodeHexString(data) + " ]";
        LOG.error(msg, ex);
        return null;
    }
}