Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.apache.hadoop.hbase.util.Bytes.java

/**
 * @param a left operand//from   w  w w.  ja  v a 2  s .c o m
 * @param buf right operand
 * @return True if equal
 */
public static boolean equals(byte[] a, ByteBuffer buf) {
    if (a == null)
        return buf == null;
    if (buf == null)
        return false;
    if (a.length != buf.remaining())
        return false;

    // Thou shalt not modify the original byte buffer in what should be read only operations.
    ByteBuffer b = buf.duplicate();
    for (byte anA : a) {
        if (anA != b.get()) {
            return false;
        }
    }
    return true;
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileWriterV3.java

private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount,
        boolean findMidKey, boolean useTags) throws IOException {
    HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(useTags)
            .withCompression(compressAlgo).build();
    HFileWriterV3 writer = (HFileWriterV3) new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
            .withPath(fs, hfilePath).withFileContext(context).withComparator(KeyValue.COMPARATOR).create();

    Random rand = new Random(9713312); // Just a fixed seed.
    List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);

    for (int i = 0; i < entryCount; ++i) {
        byte[] keyBytes = TestHFileWriterV2.randomOrderedKey(rand, i);

        // A random-length random value.
        byte[] valueBytes = TestHFileWriterV2.randomValue(rand);
        KeyValue keyValue = null;
        if (useTags) {
            ArrayList<Tag> tags = new ArrayList<Tag>();
            for (int j = 0; j < 1 + rand.nextInt(4); j++) {
                byte[] tagBytes = new byte[16];
                rand.nextBytes(tagBytes);
                tags.add(new Tag((byte) 1, tagBytes));
            }//  ww  w  . j a  va2 s .c o  m
            keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes, tags);
        } else {
            keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes);
        }
        writer.append(keyValue);
        keyValues.add(keyValue);
    }

    // Add in an arbitrary order. They will be sorted lexicographically by
    // the key.
    writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C."));
    writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow"));
    writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris"));

    writer.close();

    FSDataInputStream fsdis = fs.open(hfilePath);

    long fileSize = fs.getFileStatus(hfilePath).getLen();
    FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize);

    assertEquals(3, trailer.getMajorVersion());
    assertEquals(entryCount, trailer.getEntryCount());
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo).withIncludesMvcc(false)
            .withIncludesTags(useTags).withHBaseCheckSum(true).build();
    HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
    // Comparator class name is stored in the trailer in version 2.
    KVComparator comparator = trailer.createComparator();
    HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
            trailer.getNumDataIndexLevels());
    HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
            KeyValue.RAW_COMPARATOR, 1);

    HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(),
            fileSize - trailer.getTrailerSize());
    // Data index. We also read statistics about the block index written after
    // the root level.
    dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
            trailer.getDataIndexCount());

    if (findMidKey) {
        byte[] midkey = dataBlockIndexReader.midkey();
        assertNotNull("Midkey should not be null", midkey);
    }

    // Meta index.
    metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(),
            trailer.getMetaIndexCount());
    // File info
    FileInfo fileInfo = new FileInfo();
    fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
    byte[] keyValueFormatVersion = fileInfo.get(HFileWriterV3.KEY_VALUE_VERSION);
    boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0;

    // Counters for the number of key/value pairs and the number of blocks
    int entriesRead = 0;
    int blocksRead = 0;
    long memstoreTS = 0;

    // Scan blocks the way the reader would scan them
    fsdis.seek(0);
    long curBlockPos = 0;
    while (curBlockPos <= trailer.getLastDataBlockOffset()) {
        HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
        assertEquals(BlockType.DATA, block.getBlockType());
        ByteBuffer buf = block.getBufferWithoutHeader();
        int keyLen = -1;
        while (buf.hasRemaining()) {

            keyLen = buf.getInt();

            int valueLen = buf.getInt();

            byte[] key = new byte[keyLen];
            buf.get(key);

            byte[] value = new byte[valueLen];
            buf.get(value);
            byte[] tagValue = null;
            if (useTags) {
                int tagLen = buf.getShort();
                tagValue = new byte[tagLen];
                buf.get(tagValue);
            }

            if (includeMemstoreTS) {
                ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(),
                        buf.arrayOffset() + buf.position(), buf.remaining());
                DataInputStream data_input = new DataInputStream(byte_input);

                memstoreTS = WritableUtils.readVLong(data_input);
                buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS));
            }

            // A brute-force check to see that all keys and values are correct.
            assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0);
            assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0);
            if (useTags) {
                assertNotNull(tagValue);
                KeyValue tkv = keyValues.get(entriesRead);
                assertEquals(tagValue.length, tkv.getTagsLength());
                assertTrue(Bytes.compareTo(tagValue, 0, tagValue.length, tkv.getTagsArray(),
                        tkv.getTagsOffset(), tkv.getTagsLength()) == 0);
            }
            ++entriesRead;
        }
        ++blocksRead;
        curBlockPos += block.getOnDiskSizeWithHeader();
    }
    LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead);
    assertEquals(entryCount, entriesRead);

    // Meta blocks. We can scan until the load-on-open data offset (which is
    // the root block index offset in version 2) because we are not testing
    // intermediate-level index blocks here.

    int metaCounter = 0;
    while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) {
        LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset());
        HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
        assertEquals(BlockType.META, block.getBlockType());
        Text t = new Text();
        ByteBuffer buf = block.getBufferWithoutHeader();
        if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) {
            throw new IOException(
                    "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName());
        }
        Text expectedText = (metaCounter == 0 ? new Text("Paris")
                : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C."));
        assertEquals(expectedText, t);
        LOG.info("Read meta block data: " + t);
        ++metaCounter;
        curBlockPos += block.getOnDiskSizeWithHeader();
    }

    fsdis.close();
}

From source file:net.jradius.freeradius.FreeRadiusListener.java

public JRadiusEvent parseRequest(ListenerRequest listenerRequest, ByteBuffer notUsed, InputStream in)
        throws Exception {
    FreeRadiusRequest request = (FreeRadiusRequest) requestObjectPool.borrowObject();
    request.setBorrowedFromPool(requestObjectPool);

    int totalLength = (int) (RadiusFormat.readUnsignedInt(in) - 4);
    int readOffset = 0;

    ByteBuffer buffer = request.buffer_in;

    if (totalLength < 0 || totalLength > buffer.capacity()) {
        return null;
    }//w w  w. j  a va2s .  com

    buffer.clear();
    byte[] payload = buffer.array();

    while (readOffset < totalLength) {
        int result = in.read(payload, readOffset, totalLength - readOffset);
        if (result < 0)
            return null;
        readOffset += result;
    }

    buffer.limit(totalLength);

    long nameLength = RadiusFormat.getUnsignedInt(buffer);

    if (nameLength < 0 || nameLength > 1024) {
        throw new RadiusException("KeepAlive rlm_jradius connection has been closed");
    }

    byte[] nameBytes = new byte[(int) nameLength];
    buffer.get(nameBytes);

    int messageType = RadiusFormat.getUnsignedByte(buffer);
    int packetCount = RadiusFormat.getUnsignedByte(buffer);

    RadiusPacket rp[] = PacketFactory.parse(buffer, packetCount);

    long length = RadiusFormat.getUnsignedInt(buffer);

    if (length > buffer.remaining()) {
        throw new RadiusException("bad length");
    }

    AttributeList configItems = new AttributeList();
    format.unpackAttributes(configItems, buffer, (int) length, true);

    request.setConfigItems(configItems);
    request.setSender(new String(nameBytes));
    request.setType(messageType);
    request.setPackets(rp);

    return request;
}

From source file:com.kactech.otj.Utils.java

public static String open(byte[] encryptedEnvelope, PrivateKey privateKey)
        throws InvalidKeyException, NoSuchAlgorithmException, InvalidAlgorithmParameterException,
        IllegalBlockSizeException, BadPaddingException {
    String str;/*from   w  w  w . jav  a  2 s. c om*/
    byte[] by;
    ByteBuffer buff = ByteBuffer.wrap(encryptedEnvelope);
    buff.order(ByteOrder.BIG_ENDIAN);
    int envType = buff.getShort();// expected 1(asymmetric)
    if (envType != 1)
        throw new UnsupportedOperationException("unexpected envelope type " + envType);
    int arraySize = buff.getInt();// can result in negative integer but not expecting it here
    if (arraySize != 1)//TODO
        throw new UnsupportedOperationException("current code doesn't support multi-nym response");
    byte[] encKeyBytes = null;
    byte[] vectorBytes = null;
    for (int i = 0; i < arraySize; i++) {
        int nymIDLen = buff.getInt();
        by = new byte[nymIDLen];
        buff.get(by);
        String nymID;
        try {
            nymID = new String(by, 0, by.length - 1, Utils.US_ASCII);
        } catch (UnsupportedEncodingException e) {
            throw new RuntimeException(e);
        } // take nymID W/O trailing \0
          //TODO nymID matching!
        int keyLength = buff.getInt();
        encKeyBytes = new byte[keyLength];
        buff.get(encKeyBytes);
        int vectorLength = buff.getInt();
        vectorBytes = new byte[vectorLength];
        buff.get(vectorBytes);

    }
    byte[] encryptedMsg = new byte[buff.remaining()];
    buff.get(encryptedMsg);

    Cipher cipher;
    try {
        cipher = Cipher.getInstance(WRAP_ALGO);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    cipher.init(Cipher.UNWRAP_MODE, privateKey);
    SecretKeySpec aesKey = (SecretKeySpec) cipher.unwrap(encKeyBytes, "AES", Cipher.SECRET_KEY);
    try {
        cipher = Cipher.getInstance("AES/CBC/PKCS5Padding");
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    cipher.init(Cipher.DECRYPT_MODE, aesKey, new IvParameterSpec(vectorBytes));
    by = cipher.doFinal(encryptedMsg);
    try {
        str = new String(by, 0, by.length - 1, Utils.UTF8);
    } catch (UnsupportedEncodingException e) {
        throw new RuntimeException(e);
    } // w/o trailing \0
    return str;
}

From source file:org.apache.hadoop.fs.TestEnhancedByteBufferAccess.java

@Test
public void testZeroCopyReadsNoFallback() throws Exception {
    HdfsConfiguration conf = initZeroCopyTest();
    MiniDFSCluster cluster = null;/*from  ww w.j  av a2 s  .com*/
    final Path TEST_PATH = new Path("/a");
    FSDataInputStream fsIn = null;
    final int TEST_FILE_LENGTH = 3 * BLOCK_SIZE;

    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, 7567L);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        byte original[] = new byte[TEST_FILE_LENGTH];
        IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
        fsIn.close();
        fsIn = fs.open(TEST_PATH);
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        ByteBuffer result;
        try {
            result = dfsIn.read(null, BLOCK_SIZE + 1, EnumSet.noneOf(ReadOption.class));
            Assert.fail("expected UnsupportedOperationException");
        } catch (UnsupportedOperationException e) {
            // expected
        }
        result = dfsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(BLOCK_SIZE, result.remaining());
        Assert.assertEquals(BLOCK_SIZE, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BLOCK_SIZE, dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
        Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE), byteBufferToArray(result));
    } finally {
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}

From source file:com.healthmarketscience.jackcess.Column.java

/**
 * Decodes "Currency" values./*  w  ww .ja  v a  2s  .  co m*/
 * 
 * @param buffer Column value that points to currency data
 * @return BigDecimal representing the monetary value
 * @throws IOException if the value cannot be parsed 
 */
private static BigDecimal readCurrencyValue(ByteBuffer buffer) throws IOException {
    if (buffer.remaining() != 8) {
        throw new IOException("Invalid money value.");
    }

    return new BigDecimal(BigInteger.valueOf(buffer.getLong(0)), 4);
}

From source file:de.fhg.fokus.diameter.DiameterPeer.transport.Communicator.java

public void run() {
    MessageInfo messageInfo = null;/*from ww w  .  j  a v  a  2s.com*/
    ByteBuffer receiveByteBuffer = ByteBuffer.allocateDirect(MAX_MESSAGE_LENGTH);
    DiameterMessage msg = null;
    byte[] buffer = null;
    int len = 0;
    //handler to keep track of association setup and termination
    AssociationHandler assocHandler = new AssociationHandler();
    try {
        while (this.running) {
            messageInfo = sctpChannel.receive(receiveByteBuffer, System.out, assocHandler);
            log.debug("Received msg from communicator:" + this + " and sctpChannel:" + sctpChannel);
            log.debug("Received msg's length:" + messageInfo.bytes());
            log.error("Received msg's length:" + messageInfo.bytes());
            receiveByteBuffer.flip();

            if (receiveByteBuffer.remaining() > 0) {
                buffer = new byte[messageInfo.bytes()];
                receiveByteBuffer.get(buffer);
                receiveByteBuffer.clear();
                // log.debug("The origin message stream  is:\n" + CommonMethod.byteToHex(buffer));
                //first we check the version
                if (buffer[0] != 1) {
                    log.error("Expecting diameter version 1, received version " + buffer[0]);
                    continue;
                }
                //then we check the length of the message
                len = ((int) buffer[1] & 0xFF) << 16 | ((int) buffer[2] & 0xFF) << 8 | ((int) buffer[3] & 0xFF);
                if (len > MAX_MESSAGE_LENGTH) {
                    log.error("Message too long (msg length:" + len + " > max buffer length:"
                            + MAX_MESSAGE_LENGTH + ").");
                    continue;
                }
                //now we can decode the message
                try {
                    msg = Codec.decodeDiameterMessage(buffer, 0);
                } catch (DiameterMessageDecodeException e) {
                    log.error("Error decoding diameter message !");
                    log.error(e, e);
                    msg = null;
                    continue;
                }
                msg.networkTime = System.currentTimeMillis();
                log.debug("Received message is:\n" + msg);
                if (this.peer != null) {
                    this.peer.refreshTimer();
                }
                processMessage(msg);
            }
            msg = null;
        }
    } catch (Exception e1) {
        log.error("Exception:" + e1.getCause() + " catched in communicator:" + this + " and running flag="
                + running);
        if (this.running) {
            if (this.peer != null) {
                if (this.peer.I_comm == this) {
                    StateMachine.process(this.peer, StateMachine.I_Peer_Disc);
                }
                if (this.peer.R_comm == this) {
                    log.error("Now closing the peer:" + this.peer);
                    StateMachine.process(this.peer, StateMachine.R_Peer_Disc);
                }
            }
            log.error("Error reading from sctpChannel:" + sctpChannel + ", the channel might be colsed.");

        } /* else it was a shutdown request, it's normal */
    }
    log.debug("Now closing communicator:" + this + ", and it's sctpChannel:" + sctpChannel);
    this.running = false;
    try {
        sctpChannel.close();
    } catch (IOException e) {
        log.error("Error closing sctpChannel !");
        log.error(e, e);
    }
}

From source file:com.android.camera.one.v2.OneCameraZslImpl.java

/**
 * Given an image reader, extracts the JPEG image bytes and then closes the
 * reader.//from   w w  w. j  a  va2s .  co m
 *
 * @param img the image from which to extract jpeg bytes or compress to
 *            jpeg.
 * @param degrees the angle to rotate the image clockwise, in degrees. Rotation is
 *            only applied to YUV images.
 * @return The bytes of the JPEG image. Newly allocated.
 */
private byte[] acquireJpegBytes(Image img, int degrees) {
    ByteBuffer buffer;

    if (img.getFormat() == ImageFormat.JPEG) {
        Image.Plane plane0 = img.getPlanes()[0];
        buffer = plane0.getBuffer();

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);
        buffer.rewind();
        return imageBytes;
    } else if (img.getFormat() == ImageFormat.YUV_420_888) {
        buffer = mJpegByteBufferPool.acquire();
        if (buffer == null) {
            buffer = ByteBuffer.allocateDirect(img.getWidth() * img.getHeight() * 3);
        }

        int numBytes = JpegUtilNative.compressJpegFromYUV420Image(new AndroidImageProxy(img), buffer,
                JPEG_QUALITY, degrees);

        if (numBytes < 0) {
            throw new RuntimeException("Error compressing jpeg.");
        }

        buffer.limit(numBytes);

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);

        buffer.clear();
        mJpegByteBufferPool.release(buffer);

        return imageBytes;
    } else {
        throw new RuntimeException("Unsupported image format.");
    }
}

From source file:org.commoncrawl.hadoop.io.deprecated.ArcFileReader.java

@Test
public void testReader(File file) throws Exception {

    checkCRLFStateMachine();// w  ww.  j a  v a 2 s .  co  m

    setIOTimeoutValue(30000);

    resetState();

    Thread thread = new Thread(new Runnable() {

        public void run() {
            try {

                while (hasMoreItems()) {
                    ArcFileItem item = new ArcFileItem();

                    getNextItem(item);

                    LOG.info("GOT Item URL:" + item.getUri() + " StreamPos:" + item.getArcFilePos()
                            + " Content Length:" + item.getContent().getCount());
                    for (ArcFileHeaderItem headerItem : item.getHeaderItems()) {
                        if (headerItem.isFieldDirty(ArcFileHeaderItem.Field_ITEMKEY)) {
                            // LOG.info("Header Item:" + headerItem.getItemKey() + " :" +
                            // headerItem.getItemValue());
                        } else {
                            // LOG.info("Header Item:" + headerItem.getItemValue());
                        }
                    }
                    // LOG.info("Content Length:" + item.getContent().getCount());
                    // LOG.info("Content:");
                    /*
                     * ByteArrayInputStream inputStream = new
                     * ByteArrayInputStream(item.getContent
                     * ().getReadOnlyBytes(),0,item.getContent().getCount());
                     * BufferedReader reader = new BufferedReader(new
                     * InputStreamReader(inputStream,Charset.forName("ASCII"))); String
                     * line = null; while ((line = reader.readLine()) != null) {
                     * LOG.info(line); }
                     */
                }
                LOG.info("NO MORE ITEMS... BYE");
            } catch (IOException e) {
                LOG.error(StringUtils.stringifyException(e));
            }
        }

    });

    // run the thread ...
    thread.start();

    ReadableByteChannel channel = Channels.newChannel(new FileInputStream(file));

    try {

        int totalBytesRead = 0;
        for (;;) {

            ByteBuffer buffer = ByteBuffer.allocate(32768);

            int bytesRead = channel.read(buffer);
            // LOG.info("Read "+bytesRead + " From File");

            if (bytesRead == -1) {
                finished();
                break;
            } else {
                buffer.flip();
                totalBytesRead += buffer.remaining();
                available(buffer);
            }
        }
    } finally {
        channel.close();
    }

    // now wait for thread to die ...
    LOG.info("Done Reading File.... Waiting for ArcFileThread to DIE");
    thread.join();
    LOG.info("Done Reading File.... ArcFileThread to DIED");
}

From source file:nextflow.fs.dx.DxUploadOutputStream.java

@SuppressWarnings("unchecked")
private void consumeBuffer(final ByteBuffer buffer, final int chunkIndex) throws IOException {
    log.debug("File: {} > uploading chunk: {}", fileId, chunkIndex);

    // request to upload a new chunk
    // note: dnanexus upload chunk index is 1-based
    Map<String, Object> upload = remote.fileUpload(fileId, chunkIndex);
    log.trace("File: {} > chunk [{}] > FileUpload: {}", fileId, chunkIndex, upload);

    // the response provide the url when 'post' the chunk and the
    // 'authorization' code
    String url = (String) upload.get("url");
    Map<String, Object> headers = (Map<String, Object>) upload.get("headers");
    String auth = (String) headers.get("Authorization");

    // create a 'post' request to upload the stuff
    HttpPost post = new HttpPost(url);
    post.setHeader("Authorization", auth);

    log.trace("File: {} > chunk [{}] > buffer limit: {}; remaining: {}", fileId, chunkIndex, buffer.limit(),
            buffer.remaining());

    HttpEntity payload = new InputStreamEntity(new ByteBufferBackedInputStream(buffer), buffer.limit());
    post.setEntity(payload);// ww  w.j  a v  a  2  s .co  m

    //        HttpClient client = new DefaultHttpClient();
    //        client.getParams().setParameter(CoreProtocolPNames.PROTOCOL_VERSION, HttpVersion.HTTP_1_1);
    //        log.trace("File: {} > chunk [{}] > Post starting: {}", fileId, chunkIndex, post);

    HttpEntity entity = DxHttpClient.getInstance().http().execute(post).getEntity();
    String response = EntityUtils.toString(entity, "UTF-8");
    log.trace("File: {} > chunk [{}] > post response: {}", fileId, chunkIndex, response);

    //        // close the client (maybe not really necessary)
    //        client.getConnectionManager().shutdown();
    // put the 'buffer' in the pool, so that it can be recycled
    bufferPool.offer(buffer);

    log.trace("File: {} > completed upload chunk: ", fileId, chunkIndex);
}