Example usage for java.nio ByteBuffer clear

List of usage examples for java.nio ByteBuffer clear

Introduction

In this page you can find the example usage for java.nio ByteBuffer clear.

Prototype

public final Buffer clear() 

Source Link

Document

Clears this buffer.

Usage

From source file:org.apache.hadoop.crypto.CryptoStreamsTestBase.java

/** Test byte buffer read with different buffer size. */
@Test(timeout = 120000)//  ww w .ja v  a  2  s .c o  m
public void testByteBufferRead() throws Exception {
    OutputStream out = getOutputStream(defaultBufferSize);
    writeData(out);

    // Default buffer size, initial buffer position is 0
    InputStream in = getInputStream(defaultBufferSize);
    ByteBuffer buf = ByteBuffer.allocate(dataLen + 100);
    byteBufferReadCheck(in, buf, 0);
    in.close();

    // Default buffer size, initial buffer position is not 0
    in = getInputStream(defaultBufferSize);
    buf.clear();
    byteBufferReadCheck(in, buf, 11);
    in.close();

    // Small buffer size, initial buffer position is 0
    in = getInputStream(smallBufferSize);
    buf.clear();
    byteBufferReadCheck(in, buf, 0);
    in.close();

    // Small buffer size, initial buffer position is not 0
    in = getInputStream(smallBufferSize);
    buf.clear();
    byteBufferReadCheck(in, buf, 11);
    in.close();

    // Direct buffer, default buffer size, initial buffer position is 0
    in = getInputStream(defaultBufferSize);
    buf = ByteBuffer.allocateDirect(dataLen + 100);
    byteBufferReadCheck(in, buf, 0);
    in.close();

    // Direct buffer, default buffer size, initial buffer position is not 0
    in = getInputStream(defaultBufferSize);
    buf.clear();
    byteBufferReadCheck(in, buf, 11);
    in.close();

    // Direct buffer, small buffer size, initial buffer position is 0
    in = getInputStream(smallBufferSize);
    buf.clear();
    byteBufferReadCheck(in, buf, 0);
    in.close();

    // Direct buffer, small buffer size, initial buffer position is not 0
    in = getInputStream(smallBufferSize);
    buf.clear();
    byteBufferReadCheck(in, buf, 11);
    in.close();
}

From source file:cn.ac.ncic.mastiff.io.coding.RunLengthEncodingIntReader.java

public byte[] CompressensureDecompressed() throws IOException {
    FlexibleEncoding.ORC.DynamicByteArray dynamicBuffer = new FlexibleEncoding.ORC.DynamicByteArray();
    dynamicBuffer.add(inBuf.getData(), 0, inBuf.getLength());
    ByteBuffer byteBuf = ByteBuffer.allocate(dynamicBuffer.size());
    dynamicBuffer.setByteBuffer(byteBuf, 0, dynamicBuffer.size());
    byteBuf.flip();/*from   w w w.j a v  a 2 s  .  c o m*/
    FlexibleEncoding.ORC.InStream instream = FlexibleEncoding.ORC.InStream.create("test", byteBuf, null,
            dynamicBuffer.size());
    RunLengthIntegerReader rlein = new RunLengthIntegerReader(instream, true);
    DataOutputBuffer decoding = new DataOutputBuffer();
    //  decompressedSize = bb.getInt();
    decoding.writeInt(decompressedSize);
    decoding.writeInt(numPairs);
    decoding.writeInt(startPos);
    for (int i = 0; i < numPairs; i++) {
        int tmp = (int) rlein.next();
        decoding.writeInt(tmp);
    }
    byteBuf.clear();
    inBuf.close();
    return decoding.getData();
}

From source file:org.apache.kylin.storage.hbase.cube.v2.coprocessor.endpoint.CubeVisitService.java

@SuppressWarnings("checkstyle:methodlength")
@Override/*www. ja  v a  2s.c  o m*/
public void visitCube(final RpcController controller, final CubeVisitProtos.CubeVisitRequest request,
        RpcCallback<CubeVisitProtos.CubeVisitResponse> done) {
    List<RegionScanner> regionScanners = Lists.newArrayList();
    HRegion region = null;

    StringBuilder sb = new StringBuilder();
    byte[] allRows;
    String debugGitTag = "";

    CubeVisitProtos.CubeVisitResponse.ErrorInfo errorInfo = null;

    String queryId = request.hasQueryId() ? request.getQueryId() : "UnknownId";
    try (SetThreadName ignored = new SetThreadName("Query %s", queryId)) {
        final long serviceStartTime = System.currentTimeMillis();

        region = (HRegion) env.getRegion();
        region.startRegionOperation();

        // if user change kylin.properties on kylin server, need to manually redeploy coprocessor jar to update KylinConfig of Env.
        KylinConfig kylinConfig = KylinConfig.createKylinConfig(request.getKylinProperties());
        KylinConfig.setKylinConfigThreadLocal(kylinConfig);

        debugGitTag = region.getTableDesc().getValue(IRealizationConstants.HTableGitTag);

        final GTScanRequest scanReq = GTScanRequest.serializer.deserialize(
                ByteBuffer.wrap(HBaseZeroCopyByteString.zeroCopyGetBytes(request.getGtScanRequest())));
        List<List<Integer>> hbaseColumnsToGT = Lists.newArrayList();
        for (IntList intList : request.getHbaseColumnsToGTList()) {
            hbaseColumnsToGT.add(intList.getIntsList());
        }
        StorageSideBehavior behavior = StorageSideBehavior.valueOf(scanReq.getStorageBehavior());
        final List<RawScan> hbaseRawScans = deserializeRawScans(
                ByteBuffer.wrap(HBaseZeroCopyByteString.zeroCopyGetBytes(request.getHbaseRawScan())));

        appendProfileInfo(sb, "start latency: " + (serviceStartTime - scanReq.getStartTime()),
                serviceStartTime);

        final List<InnerScannerAsIterator> cellListsForeachRawScan = Lists.newArrayList();

        for (RawScan hbaseRawScan : hbaseRawScans) {
            if (request.getRowkeyPreambleSize() - RowConstants.ROWKEY_CUBOIDID_LEN > 0) {
                //if has shard, fill region shard to raw scan start/end
                updateRawScanByCurrentRegion(hbaseRawScan, region,
                        request.getRowkeyPreambleSize() - RowConstants.ROWKEY_CUBOIDID_LEN);
            }

            Scan scan = CubeHBaseRPC.buildScan(hbaseRawScan);
            RegionScanner innerScanner = region.getScanner(scan);
            regionScanners.add(innerScanner);

            InnerScannerAsIterator cellListIterator = new InnerScannerAsIterator(innerScanner);
            cellListsForeachRawScan.add(cellListIterator);
        }

        final Iterator<List<Cell>> allCellLists = Iterators.concat(cellListsForeachRawScan.iterator());

        if (behavior.ordinal() < StorageSideBehavior.SCAN.ordinal()) {
            //this is only for CoprocessorBehavior.RAW_SCAN case to profile hbase scan speed
            List<Cell> temp = Lists.newArrayList();
            int counter = 0;
            for (RegionScanner innerScanner : regionScanners) {
                while (innerScanner.nextRaw(temp)) {
                    counter++;
                }
            }
            appendProfileInfo(sb, "scanned " + counter, serviceStartTime);
        }

        if (behavior.ordinal() < StorageSideBehavior.SCAN_FILTER_AGGR_CHECKMEM.ordinal()) {
            scanReq.disableAggCacheMemCheck(); // disable mem check if so told
        }

        final long storagePushDownLimit = scanReq.getStoragePushDownLimit();

        ResourceTrackingCellListIterator cellListIterator = new ResourceTrackingCellListIterator(allCellLists,
                scanReq.getStorageScanRowNumThreshold(), // for old client (scan threshold)
                !request.hasMaxScanBytes() ? Long.MAX_VALUE : request.getMaxScanBytes(), // for new client
                scanReq.getTimeout());

        IGTStore store = new HBaseReadonlyStore(cellListIterator, scanReq, hbaseRawScans.get(0).hbaseColumns,
                hbaseColumnsToGT, request.getRowkeyPreambleSize(), behavior.delayToggledOn(),
                request.getIsExactAggregate());

        IGTScanner rawScanner = store.scan(scanReq);
        IGTScanner finalScanner = scanReq.decorateScanner(rawScanner, behavior.filterToggledOn(),
                behavior.aggrToggledOn(), false, request.getSpillEnabled());

        ByteBuffer buffer = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);

        ByteArrayOutputStream outputStream = new ByteArrayOutputStream(
                BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);//ByteArrayOutputStream will auto grow
        int finalRowCount = 0;

        try {
            for (GTRecord oneRecord : finalScanner) {
                buffer.clear();
                try {
                    oneRecord.exportColumns(scanReq.getColumns(), buffer);
                } catch (BufferOverflowException boe) {
                    buffer = ByteBuffer.allocate(oneRecord.sizeOf(scanReq.getColumns()) * 2);
                    oneRecord.exportColumns(scanReq.getColumns(), buffer);
                }

                outputStream.write(buffer.array(), 0, buffer.position());

                finalRowCount++;

                //if it's doing storage aggr, then should rely on GTAggregateScanner's limit check
                if (!scanReq.isDoingStorageAggregation() && finalRowCount >= storagePushDownLimit) {
                    //read one more record than limit
                    logger.info("The finalScanner aborted because storagePushDownLimit is satisfied");
                    break;
                }
            }
        } catch (KylinTimeoutException e) {
            logger.info("Abort scan: {}", e.getMessage());
            errorInfo = CubeVisitProtos.CubeVisitResponse.ErrorInfo.newBuilder()
                    .setType(CubeVisitProtos.CubeVisitResponse.ErrorType.TIMEOUT).setMessage(e.getMessage())
                    .build();
        } catch (ResourceLimitExceededException e) {
            logger.info("Abort scan: {}", e.getMessage());
            errorInfo = CubeVisitProtos.CubeVisitResponse.ErrorInfo.newBuilder()
                    .setType(CubeVisitProtos.CubeVisitResponse.ErrorType.RESOURCE_LIMIT_EXCEEDED)
                    .setMessage(e.getMessage()).build();
        } finally {
            finalScanner.close();
        }

        appendProfileInfo(sb, "agg done", serviceStartTime);
        logger.info("Total scanned {} rows and {} bytes", cellListIterator.getTotalScannedRowCount(),
                cellListIterator.getTotalScannedRowBytes());

        //outputStream.close() is not necessary
        byte[] compressedAllRows;
        if (errorInfo == null) {
            allRows = outputStream.toByteArray();
        } else {
            allRows = new byte[0];
        }
        if (!kylinConfig.getCompressionResult()) {
            compressedAllRows = allRows;
        } else {
            compressedAllRows = CompressionUtils.compress(allRows);
        }

        appendProfileInfo(sb, "compress done", serviceStartTime);
        logger.info("Size of final result = {} ({} before compressing)", compressedAllRows.length,
                allRows.length);

        OperatingSystemMXBean operatingSystemMXBean = (OperatingSystemMXBean) ManagementFactory
                .getOperatingSystemMXBean();
        double systemCpuLoad = operatingSystemMXBean.getSystemCpuLoad();
        double freePhysicalMemorySize = operatingSystemMXBean.getFreePhysicalMemorySize();
        double freeSwapSpaceSize = operatingSystemMXBean.getFreeSwapSpaceSize();

        appendProfileInfo(sb, "server stats done", serviceStartTime);
        sb.append(" debugGitTag:" + debugGitTag);

        CubeVisitProtos.CubeVisitResponse.Builder responseBuilder = CubeVisitProtos.CubeVisitResponse
                .newBuilder();
        if (errorInfo != null) {
            responseBuilder.setErrorInfo(errorInfo);
        }
        done.run(responseBuilder.//
                setCompressedRows(HBaseZeroCopyByteString.wrap(compressedAllRows)).//too many array copies 
                setStats(CubeVisitProtos.CubeVisitResponse.Stats.newBuilder()
                        .setAggregatedRowCount(cellListIterator.getTotalScannedRowCount() - finalRowCount)
                        .setScannedRowCount(cellListIterator.getTotalScannedRowCount())
                        .setScannedBytes(cellListIterator.getTotalScannedRowBytes())
                        .setServiceStartTime(serviceStartTime).setServiceEndTime(System.currentTimeMillis())
                        .setSystemCpuLoad(systemCpuLoad).setFreePhysicalMemorySize(freePhysicalMemorySize)
                        .setFreeSwapSpaceSize(freeSwapSpaceSize)
                        .setHostname(InetAddress.getLocalHost().getHostName()).setEtcMsg(sb.toString())
                        .setNormalComplete(errorInfo == null ? 1 : 0).build())
                .build());

    } catch (IOException ioe) {
        logger.error(ioe.toString(), ioe);
        IOException wrapped = new IOException("Error in coprocessor " + debugGitTag, ioe);
        ResponseConverter.setControllerException(controller, wrapped);
    } finally {
        for (RegionScanner innerScanner : regionScanners) {
            IOUtils.closeQuietly(innerScanner);
        }
        if (region != null) {
            try {
                region.closeRegionOperation();
            } catch (IOException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            }
        }
    }
}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java

/**
 * Read external.//from   ww w  .  j  av  a2 s  . co  m
 *
 * @param blockName the block name
 * @return the cacheable
 * @throws IOException Signals that an I/O exception has occurred.
 */
@SuppressWarnings("unused")
private Cacheable readExternal(String blockName) throws IOException {
    if (overflowExtEnabled == false)
        return null;
    // Check if we have  already this block in external storage cache
    try {
        StorageHandle handle = (StorageHandle) extStorageCache.get(blockName);
        if (handle == null)
            return null;
        ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();

        buffer.clear();

        StorageHandle newHandle = storage.getData(handle, buffer);
        int size = buffer.getInt(0);
        if (size == 0)
            return null;
        boolean inMemory = buffer.get(4) == (byte) 1;
        buffer.position(5);
        buffer.limit(size + 4);
        if (deserializer.get() == null)
            return null;
        CacheableDeserializer<Cacheable> deserializer = this.deserializer.get();
        Cacheable obj = deserializer.deserialize(buffer);
        if (inMemory) {
            permGenCache.put(blockName, obj);
        } else {
            tenGenCache.put(blockName, obj);
        }

        if (newHandle.equals(handle) == false) {
            extStorageCache.put(blockName, newHandle);
        }

        return obj;

    } catch (NativeMemoryException e) {
        throw new IOException(e);
    }

}

From source file:interfazGrafica.frmMoverRFC.java

public void mostrarPDF() {
    String curp = "";
    curp = txtCapturaCurp.getText();//  ww  w .j  a v a  2 s.c om
    ArrayList<DocumentoRFC> Docs = new ArrayList<>();
    DocumentoRFC sigExp;
    DocumentoRFC temporal;
    RFCescaneado tempo = new RFCescaneado();

    //tempo.borrartemporal();
    sigExp = expe.obtenerArchivosExp();
    Nombre_Archivo = sigExp.getNombre();
    nombreArchivo.setText(Nombre_Archivo);

    if (Nombre_Archivo != "") {
        doc = sigExp;
        System.out.println("Obtuvo el nombre del archivo.");
        System.out.println(doc.ruta + doc.nombre);
        String file = "C:\\escaneos\\Local\\Temporal\\" + doc.nombre;
        File arch = new File(file);
        System.out.println("Encontr el siguiente archivo:");
        System.out.println(file);
        System.out.println("");
        if (arch.exists()) {
            System.out.println("El archivo existe");
        }
        try {
            System.out.println("Entr al try");
            RandomAccessFile raf = new RandomAccessFile(file, "r");
            System.out.println("Reconoc el archivo" + file);
            FileChannel channel = raf.getChannel();
            System.out.println("Se abrio el canal");
            ByteBuffer buf = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size());
            System.out.println("Channel map");
            PDFFile pdffile = new PDFFile(buf);
            System.out.println("Creando un pdf file");
            PDFPage page = pdffile.getPage(0);
            System.out.println("Obteniendo la pagina con " + 0);

            panelpdf2.showPage(page);
            System.out.println("mostrando el panel pdf2");
            repaint();
            System.gc();

            buf.clear();
            raf.close();

            System.gc();

        } catch (Exception ioe) {
            JOptionPane.showMessageDialog(null, "Error al abrir el archivo");
            ioe.printStackTrace();
        }

    }
    // tempo.borrartemporal();
}

From source file:com.offbynull.portmapper.common.UdpCommunicator.java

@Override
protected void run() throws Exception {
    ByteBuffer recvBuffer = ByteBuffer.allocate(1100);

    while (true) {
        selector.select();//from w w  w.  j a  va2s  .c  om
        if (stopFlag) {
            return;
        }

        for (DatagramChannel channel : sendQueue.keySet()) {
            if (!sendQueue.get(channel).isEmpty()) {
                channel.register(selector, SelectionKey.OP_READ | SelectionKey.OP_WRITE);
            } else {
                channel.register(selector, SelectionKey.OP_READ);
            }
        }

        for (SelectionKey key : selector.selectedKeys()) {
            if (!key.isValid()) {
                continue;
            }

            DatagramChannel channel = (DatagramChannel) key.channel();

            if (key.isReadable()) {
                recvBuffer.clear();
                InetSocketAddress incomingAddress = (InetSocketAddress) channel.receive(recvBuffer);
                recvBuffer.flip();
                for (UdpCommunicatorListener listener : listeners) {
                    try {
                        listener.incomingPacket(incomingAddress, channel, recvBuffer.asReadOnlyBuffer());
                    } catch (RuntimeException re) { // NOPMD
                        // do nothing
                    }
                }
            } else if (key.isWritable()) {
                LinkedBlockingQueue<ImmutablePair<InetSocketAddress, ByteBuffer>> queue = sendQueue
                        .get(channel);
                ImmutablePair<InetSocketAddress, ByteBuffer> next = queue.poll();

                if (next != null) {
                    try {
                        channel.send(next.getValue(), next.getKey());
                    } catch (RuntimeException re) { // NOPMD
                        // do nothing
                    }
                }
            }
        }
    }
}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java

/**
 * Store external./*from w  ww  .j  a  v a2  s .c  o m*/
 *
 * @param blockName the block name
 * @param buf the buf
 * @param inMemory the in memory
 * @throws IOException Signals that an I/O exception has occurred.
 */
@SuppressWarnings("unused")
private void storeExternal(String blockName, Cacheable buf, boolean inMemory) throws IOException {
    // If external storage is disable - bail out
    if (overflowExtEnabled == false)
        return;
    // Check if we have  already this block in external storage cache
    if (extStorageCache.contains(blockName))
        return;

    ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();
    deserializer.set(buf.getDeserializer());
    buffer.clear();

    buffer.position(4);
    buffer.put(inMemory ? (byte) 1 : (byte) 0);
    buf.serialize(buffer);
    buffer.putInt(0, buffer.position() - 4);

    StorageHandle handle = storage.storeData(buffer);

    try {
        extStorageCache.put(blockName, handle);
    } catch (Exception e) {
        throw new IOException(e);
    }

}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * Sends upto maxChunks chunks of data.//from   w w  w.j av  a  2 s .  co  m
 * 
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) throws IOException {
    // Sends multiple chunks in one packet with a single write().

    int len = (int) Math.min(endOffset - offset, (((long) bytesPerChecksum) * ((long) maxChunks)));
    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    int packetLen = len + numChunks * checksumSize + 4;
    boolean lastDataPacket = offset + len == endOffset && len > 0;
    pkt.clear();

    PacketHeader header = new PacketHeader(packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    if (checksumSize > 0 && checksumIn != null) {
        try {
            checksumIn.readFully(buf, checksumOff, checksumLen);
        } catch (IOException e) {
            LOG.warn(" Could not read or failed to veirfy checksum for data" + " at offset " + offset
                    + " for block " + block, e);
            IOUtils.closeStream(checksumIn);
            checksumIn = null;
            if (corruptChecksumOk) {
                if (checksumOff < checksumLen) {
                    // Just fill the array with zeros.
                    Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
                }
            } else {
                throw e;
            }
        }

        // write in progress that we need to use to get last checksum
        if (lastDataPacket && lastChunkChecksum != null) {
            int start = checksumOff + checksumLen - checksumSize;
            byte[] updatedChecksum = lastChunkChecksum.getChecksum();

            if (updatedChecksum != null) {
                System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
            }
        }
    }

    int dataOff = checksumOff + checksumLen;

    if (blockInPosition < 0) {
        //normal transfer
        IOUtils.readFully(blockIn, buf, dataOff, len);

        if (verifyChecksum) {
            int dOff = dataOff;
            int cOff = checksumOff;
            int dLeft = len;

            for (int i = 0; i < numChunks; i++) {
                checksum.reset();
                int dLen = Math.min(dLeft, bytesPerChecksum);
                checksum.update(buf, dOff, dLen);
                if (!checksum.compare(buf, cOff)) {
                    long failedPos = offset + len - dLeft;
                    throw new ChecksumException("Checksum failed at " + failedPos, failedPos);
                }
                dLeft -= dLen;
                dOff += dLen;
                cOff += checksumSize;
            }
        }
        //writing is done below (mainly to handle IOException)
    }

    try {
        if (blockInPosition >= 0) {
            //use transferTo(). Checks on out and blockIn are already done. 

            SocketOutputStream sockOut = (SocketOutputStream) out;
            //first write the packet
            sockOut.write(buf, 0, dataOff);
            // no need to flush. since we know out is not a buffered stream. 

            sockOut.transferToFully(((FileInputStream) blockIn).getChannel(), blockInPosition, len);

            blockInPosition += len;
        } else {
            // normal transfer
            out.write(buf, 0, dataOff + len);
        }

    } catch (IOException e) {
        /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
        String ioem = e.getMessage();
        if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
            LOG.error("BlockSender.sendChunks() exception: ", e);
        }
        throw ioeToSocketException(e);
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:com.arpnetworking.tsdcore.sinks.KairosDbSink.java

private void addChunk(final ByteArrayOutputStream chunkStream, final ByteBuffer currentChunk,
        final Collection<byte[]> completedChunks) {
    final byte[] nextChunk = chunkStream.toByteArray();
    final int nextChunkSize = nextChunk.length;
    if (currentChunk.position() + nextChunkSize > _maxRequestSize) {
        if (currentChunk.position() > HEADER_BYTE_LENGTH) {
            // TODO(vkoskela): Add chunk size metric. [MAI-?]

            // Copy the relevant part of the buffer
            currentChunk.put(currentChunk.position() - 1, FOOTER);
            completedChunks.add(Arrays.copyOf(currentChunk.array(), currentChunk.position()));

            // Truncate all but the beginning '[' to prepare the next entries
            currentChunk.clear();
            currentChunk.put(HEADER);/*from w  w w. j a v a 2s.  co m*/
        } else {
            CHUNK_TOO_BIG_LOGGER.warn().setMessage("First chunk too big").addData("sink", getName())
                    .addData("bufferLength", currentChunk.position()).addData("nextChunkSize", nextChunkSize)
                    .addData("maxRequestSIze", _maxRequestSize).log();
        }
    }

    currentChunk.put(nextChunk);
    currentChunk.put(SEPARATOR);
    chunkStream.reset();
}

From source file:org.bytesoft.openjtcc.supports.logger.DbTransactionLoggerImpl.java

private byte[] streamToByteArray(InputStream input) {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    ReadableByteChannel in = null;
    WritableByteChannel out = null;
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    try {/*from  www . j  a  va2s.  c o  m*/
        in = Channels.newChannel(input);
        out = Channels.newChannel(baos);
        while (in.read(buffer) != -1) {
            buffer.flip();
            out.write(buffer);
            buffer.clear();
        }
    } catch (IOException ex) {
        // ignore
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException e) {
                // ignore
            }
        }
        if (baos != null) {
            try {
                baos.close();
            } catch (IOException e) {
                // ignore
            }
        }
    }
    return baos.toByteArray();
}