Example usage for java.io ByteArrayOutputStream reset

List of usage examples for java.io ByteArrayOutputStream reset

Introduction

In this page you can find the example usage for java.io ByteArrayOutputStream reset.

Prototype

public synchronized void reset() 

Source Link

Document

Resets the count field of this ByteArrayOutputStream to zero, so that all currently accumulated output in the output stream is discarded.

Usage

From source file:net.ontopia.topicmaps.viz.VizTopicMapConfigurationManager.java

private void setIcon(TopicIF topictype, String string, TopicIF filenameTopic, TopicIF iconTopic) {
    setOccurenceValue(topictype, filenameTopic, string);

    ByteArrayOutputStream output = new ByteArrayOutputStream();

    try {//from w  ww  . j  a  v a  2 s. co m
        FileInputStream file = new FileInputStream(string);
        IOUtils.copy(file, output);
        file.close();
        byte[] bytes = output.toByteArray();
        ImageIcon icon = new ImageIcon(bytes);
        iconCache.put(string, icon);
        output.reset();
        output.write(Base64.encodeBase64(bytes));
    } catch (IOException e) {
        // should never occur
        throw new OntopiaRuntimeException("INTERNAL ERROR", e);
    }

    try {
        setOccurenceValue(topictype, iconTopic, output.toString("ISO-8859-1"));
    } catch (UnsupportedEncodingException e1) {
        throw new OntopiaRuntimeException(e1);
    }
}

From source file:org.apache.hadoop.ipc.Server.java

/**
 * Setup response for the IPC Call./*from   w  w w.  j  a v  a 2s .  c  o m*/
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status {@link Status} of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream response, Call call, Status status, Writable rv,
        String errorClass, String error) throws IOException {
    response.reset();
    DataOutputStream out = new DataOutputStream(response);
    out.writeInt(call.id); // write call id
    out.writeInt(status.state); // write status

    if (status == Status.SUCCESS) {
        rv.write(out);
    } else {
        WritableUtils.writeString(out, errorClass);
        WritableUtils.writeString(out, error);
    }
    if (call.connection.useWrap) {
        wrapWithSasl(response, call);
    }
    call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}

From source file:org.kuali.kfs.module.purap.document.web.struts.BulkReceivingAction.java

public ActionForward printReceivingTicket(ActionMapping mapping, ActionForm form, HttpServletRequest request,
        HttpServletResponse response) throws Exception {
    String blkDocId = request.getParameter("docId");
    ByteArrayOutputStream baosPDF = new ByteArrayOutputStream();

    try {/*from w  w  w. ja  v  a  2  s .c  o  m*/
        // will throw validation exception if errors occur
        SpringContext.getBean(BulkReceivingService.class).performPrintReceivingTicketPDF(blkDocId, baosPDF);

        response.setHeader("Cache-Control", "max-age=30");
        response.setContentType("application/pdf");
        StringBuffer sbContentDispValue = new StringBuffer();
        String useJavascript = request.getParameter("useJavascript");
        if (useJavascript == null || useJavascript.equalsIgnoreCase("false")) {
            sbContentDispValue.append("attachment");
        } else {
            sbContentDispValue.append("inline");
        }
        StringBuffer sbFilename = new StringBuffer();
        sbFilename.append("PURAP_RECEIVING_TICKET_");
        sbFilename.append(blkDocId);
        sbFilename.append("_");
        sbFilename.append(System.currentTimeMillis());
        sbFilename.append(".pdf");
        sbContentDispValue.append("; filename=");
        sbContentDispValue.append(sbFilename);

        response.setHeader("Content-disposition", sbContentDispValue.toString());

        response.setContentLength(baosPDF.size());

        ServletOutputStream sos = response.getOutputStream();
        baosPDF.writeTo(sos);
        sos.flush();

    } finally {
        if (baosPDF != null) {
            baosPDF.reset();
        }
    }

    return null;
}

From source file:com.w20e.socrates.formatting.TestVelocityHTMLFormatter.java

public void testFormat() {

    InstanceImpl inst = new InstanceImpl();
    ModelImpl model = new ModelImpl();
    StateManager sm = new TestStateManager();

    ArrayList<Renderable> testItems = new ArrayList<Renderable>();
    ByteArrayOutputStream out = new ByteArrayOutputStream();

    try {/*from w w  w. j a v  a  2s . c o  m*/
        inst.addNode(new NodeImpl("A01", "SOME VALUE"));
        inst.addNode(new NodeImpl("A02", "SOME VALUE"));
        inst.addNode(new NodeImpl("A03"));
        inst.addNode(new NodeImpl("locale"));

        ControlImpl item = new Input("c0");
        item.setBind("A01");
        item.setLabel("Yo dude");
        item.setHint(new TranslatableImpl("Modda"));

        ControlImpl item2 = new Input("c1");
        item2.setBind("A02");
        item2.setLabel("Yo dude2");
        item2.setHint(new TranslatableImpl("Modda2"));

        ControlImpl item3 = new Checkbox("c2");
        item3.setBind("A03");
        item3.setLabel("Check me!");

        TextBlock text = new TextBlock("c2");
        text.setText("Foo! <a href='http://la.la/la/${locale}/'>lala</a>");

        testItems.add(item);
        testItems.add(item2);
        testItems.add(item3);
        testItems.add(text);

        RunnerContextImpl ctx = new RunnerContextImpl(out, this.formatter, sm, model, inst, null);
        ctx.setLocale(new Locale("en", "GB"));

        // No local options
        this.formatter.format(testItems, out, ctx);

        assertTrue(out.toString().indexOf("enable_js: true") > -1);
        assertTrue(out.toString().indexOf("disable_ajax_validation: true") > -1);

        System.out.println(out.toString());
        assertTrue(out.toString().indexOf("Yo dude") != -1);

        assertTrue(out.toString().indexOf("Foo!") != -1);

        out.reset();

        Map<String, String> opts = new HashMap<String, String>();
        opts.put("disable_ajax_validation", "false");

        ctx.setProperty("renderOptions", opts);
        ctx.setLocale(new Locale("de", "DE"));

        this.formatter.format(testItems, out, ctx);

        assertTrue(out.toString().indexOf("enable_js: true") > -1);
        assertTrue(out.toString().indexOf("disable_ajax_validation: false") > -1);

        assertTrue(out.toString().indexOf("He du!") != -1);

        //assertTrue(out.toString().indexOf("Fuu!") != -1);

    } catch (Exception e) {

        fail(e.getMessage());
    }

    try {
        this.formatter.format(testItems, null, null);
        fail("Should fail here!");
    } catch (Exception e) {
        // Whatever...
    }
}

From source file:is.artefact.flume.source.kafka.TestKafkaSource.java

@Test
public void testAvroEvent() throws InterruptedException, EventDeliveryException, IOException {
    SpecificDatumWriter<AvroFlumeEvent> writer;
    ByteArrayOutputStream tempOutStream;
    BinaryEncoder encoder;// ww  w.  jav  a2  s .c  om
    byte[] bytes;

    context.put(TOPICS, topic0);
    context.put(BATCH_SIZE, "1");
    context.put(AVRO_EVENT, "true");
    kafkaSource.configure(context);
    kafkaSource.start();

    Thread.sleep(500L);

    tempOutStream = new ByteArrayOutputStream();
    writer = new SpecificDatumWriter<AvroFlumeEvent>(AvroFlumeEvent.class);

    Map<CharSequence, CharSequence> headers = new HashMap<CharSequence, CharSequence>();
    headers.put("header1", "value1");
    headers.put("header2", "value2");

    AvroFlumeEvent e = new AvroFlumeEvent(headers, ByteBuffer.wrap("hello, world".getBytes()));
    encoder = EncoderFactory.get().directBinaryEncoder(tempOutStream, null);
    writer.write(e, encoder);
    encoder.flush();
    bytes = tempOutStream.toByteArray();

    kafkaServer.produce(topic0, "", bytes);

    String currentTimestamp = Long.toString(System.currentTimeMillis());

    headers.put(TIMESTAMP_HEADER, currentTimestamp);
    headers.put(PARTITION_HEADER, "1");
    headers.put(TOPIC_HEADER, "topic0");

    e = new AvroFlumeEvent(headers, ByteBuffer.wrap("hello, world2".getBytes()));
    tempOutStream.reset();
    encoder = EncoderFactory.get().directBinaryEncoder(tempOutStream, null);
    writer.write(e, encoder);
    encoder.flush();
    bytes = tempOutStream.toByteArray();

    kafkaServer.produce(topic0, "", bytes);

    Thread.sleep(500L);
    Assert.assertEquals(Status.READY, kafkaSource.process());
    Assert.assertEquals(Status.READY, kafkaSource.process());
    Assert.assertEquals(Status.BACKOFF, kafkaSource.process());

    Assert.assertEquals(2, events.size());

    Event event = events.get(0);

    Assert.assertEquals("hello, world", new String(event.getBody(), Charsets.UTF_8));

    Assert.assertEquals("value1", e.getHeaders().get("header1"));
    Assert.assertEquals("value2", e.getHeaders().get("header2"));

    event = events.get(1);

    Assert.assertEquals("hello, world2", new String(event.getBody(), Charsets.UTF_8));

    Assert.assertEquals("value1", e.getHeaders().get("header1"));
    Assert.assertEquals("value2", e.getHeaders().get("header2"));
    Assert.assertEquals(currentTimestamp, e.getHeaders().get(TIMESTAMP_HEADER));
    Assert.assertEquals(e.getHeaders().get(PARTITION_HEADER), "1");
    Assert.assertEquals(e.getHeaders().get(TOPIC_HEADER), "topic0");

}

From source file:org.hyperic.hq.agent.db.DiskList.java

/**
 * Add the string to the list of data being stored in the DiskList.
 *
 * @param data Data to add to the end of the list
 *//*from  w w w .  ja v  a2 s. com*/
public void addToList(String data) throws IOException {
    if (this.closed) {
        throw new IOException("Datafile already closed");
    }
    ByteArrayOutputStream bOs = new ByteArrayOutputStream(this.recordSize);
    DataOutputStream dOs = new DataOutputStream(bOs);
    dOs.writeUTF(data);
    if (bOs.size() > this.recordSize) {
        throw new IOException(
                "Data length(" + bOs.size() + ") exceeds " + "maximum record length(" + this.recordSize + ")");
    }
    final long start = now();
    bOs.write(this.padBytes, 0, this.recordSize - bOs.size());
    byte[] bytes = bOs.toByteArray();

    synchronized (this.dataFile) {
        Long firstFreeL;
        long firstFree;

        this.modNum = this.rand.nextInt();

        try {
            firstFreeL = (Long) this.freeList.first();
            firstFree = firstFreeL.longValue();
            this.freeList.remove(firstFreeL);
        } catch (NoSuchElementException exc) {
            // Else we're adding to the end
            firstFree = this.indexFile.length() / IDX_REC_LEN;
        }

        // Write the record to the data file
        this.dataFile.seek(firstFree * this.recordSize);
        this.dataFile.write(bytes);

        bOs.reset();
        dOs.writeBoolean(true); // Is Used
        dOs.writeLong(this.lastRec); // Previous record idx
        dOs.writeLong(-1); // Next record idx

        // Write the index for the record we just made
        this.indexFile.seek(firstFree * IDX_REC_LEN);
        bytes = bOs.toByteArray();
        this.indexFile.write(bytes, 0, bytes.length);

        // Update the previous 'last' record to point to us
        if (this.lastRec != -1) {
            this.indexFile.seek((this.lastRec * IDX_REC_LEN) + 1 + 8);
            this.indexFile.writeLong(firstFree);
        }

        this.lastRec = firstFree;
        if (this.firstRec == -1) {
            this.firstRec = firstFree;
        }
    }

    if (this.dataFile.length() > this.maxLength) {
        this.log.error("Maximum file size for data file: " + this.fileName + " reached (" + this.maxLength
                + " bytes), truncating.");
        deleteAllRecords();
    }
    long duration = now() - start;
    statsCollector.addStat(duration, DISK_LIST_ADD_TO_LIST_TIME);
}

From source file:io.realm.scanner.MainActivity.java

@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
    super.onActivityResult(requestCode, resultCode, data);

    switch (requestCode) {
    case REQUEST_SELECT_PHOTO:
        if (resultCode == RESULT_OK) {
            setTitle("Saving...");
            final Uri imageUri = data.getData();
            try {
                final InputStream imageStream = getContentResolver().openInputStream(imageUri);
                final byte[] readBytes = new byte[PRIME_NUMBER_1000th];
                final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream();
                int readLength;
                while ((readLength = imageStream.read(readBytes)) != -1) {
                    byteBuffer.write(readBytes, 0, readLength);
                }/*from   w w  w  . j  a va2  s .  c  o m*/
                cleanUpCurrentLabelScanIfNeeded();
                byte[] imageData = byteBuffer.toByteArray();
                if (imageData.length > IMAGE_LIMIT) {
                    BitmapFactory.Options options = new BitmapFactory.Options();
                    options.inJustDecodeBounds = true;
                    BitmapFactory.decodeByteArray(imageData, 0, imageData.length, options);
                    int outWidth = options.outWidth;
                    int outHeight = options.outHeight;
                    int inSampleSize = 1;
                    while (outWidth > 1600 || outHeight > 1600) {
                        inSampleSize *= 2;
                        outWidth /= 2;
                        outHeight /= 2;
                    }
                    options = new BitmapFactory.Options();
                    options.inSampleSize = inSampleSize;
                    final Bitmap bitmap = BitmapFactory.decodeByteArray(imageData, 0, imageData.length,
                            options);
                    byteBuffer.reset();
                    bitmap.compress(Bitmap.CompressFormat.JPEG, 80, byteBuffer);
                    imageData = byteBuffer.toByteArray();
                }
                uploadImage(imageData);
            } catch (FileNotFoundException e) {
                e.printStackTrace();
            } catch (IOException e) {
                e.printStackTrace();
            } finally {
                showPanel(Panel.PROGRESS);
                setTitle("Uploading...");
            }
        }
        break;
    case REQUEST_IMAGE_CAPTURE:
        if (resultCode == RESULT_OK && currentPhotoPath != null) {
            setTitle("Saving...");
            BitmapFactory.Options options = new BitmapFactory.Options();
            options.inJustDecodeBounds = true;
            BitmapFactory.decodeFile(currentPhotoPath, options);
            int outWidth = options.outWidth;
            int outHeight = options.outHeight;
            int inSampleSize = 1;
            while (outWidth > 1600 || outHeight > 1600) {
                inSampleSize *= 2;
                outWidth /= 2;
                outHeight /= 2;
            }
            options = new BitmapFactory.Options();
            options.inSampleSize = inSampleSize;
            Bitmap bitmap = BitmapFactory.decodeFile(currentPhotoPath, options);
            final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream();
            bitmap.compress(Bitmap.CompressFormat.JPEG, 80, byteBuffer);
            byte[] imageData = byteBuffer.toByteArray();
            uploadImage(imageData);
            showPanel(Panel.PROGRESS);
            setTitle("Uploading...");
        }
        break;
    }
}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

/**
 * Compresses the snapshot and uploads it to a bucket in objectstorage gateway as a single or multipart upload based on the configuration in
 * {@link StorageInfo}. Bucket name should be configured before invoking this method. It can be looked up and initialized by {@link #prepareForUpload()} or
 * explicitly set using {@link #setBucketName(String)}
 * //w  w w . j  a v a 2  s. c o  m
 * @param sourceFileName
 *            absolute path to the snapshot on the file system
 */
@Override
public void upload(String sourceFileName) throws SnapshotTransferException {
    validateInput(); // Validate input
    loadTransferConfig(); // Load the transfer configuration parameters from database
    SnapshotProgressCallback progressCallback = new SnapshotProgressCallback(snapshotId); // Setup the progress callback

    Boolean error = Boolean.FALSE;
    ArrayBlockingQueue<SnapshotPart> partQueue = null;
    SnapshotPart part = null;
    SnapshotUploadInfo snapUploadInfo = null;
    Future<List<PartETag>> uploadPartsFuture = null;
    Future<String> completeUploadFuture = null;

    byte[] buffer = new byte[READ_BUFFER_SIZE];
    Long readOffset = 0L;
    Long bytesRead = 0L;
    Long bytesWritten = 0L;
    int len;
    int partNumber = 1;

    try {
        // Get the uncompressed file size for uploading as metadata
        Long uncompressedSize = getFileSize(sourceFileName);

        // Setup the snapshot and part entities.
        snapUploadInfo = SnapshotUploadInfo.create(snapshotId, bucketName, keyName);
        Path zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf(partNumber));
        part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset);

        FileInputStream inputStream = new FileInputStream(sourceFileName);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        GZIPOutputStream gzipStream = new GZIPOutputStream(baos);
        FileOutputStream outputStream = new FileOutputStream(zipFilePath.toString());

        try {
            LOG.debug("Reading snapshot " + snapshotId + " and compressing it to disk in chunks of size "
                    + partSize + " bytes or greater");
            while ((len = inputStream.read(buffer)) > 0) {
                bytesRead += len;
                gzipStream.write(buffer, 0, len);

                if ((bytesWritten + baos.size()) < partSize) {
                    baos.writeTo(outputStream);
                    bytesWritten += baos.size();
                    baos.reset();
                } else {
                    gzipStream.close();
                    baos.writeTo(outputStream); // Order is important. Closing the gzip stream flushes stuff
                    bytesWritten += baos.size();
                    baos.reset();
                    outputStream.close();

                    if (partNumber > 1) {// Update the part status
                        part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.FALSE);
                    } else {// Initialize multipart upload only once after the first part is created
                        LOG.info("Uploading snapshot " + snapshotId
                                + " to objectstorage using multipart upload");
                        progressCallback.setUploadSize(uncompressedSize);
                        uploadId = initiateMulitpartUpload(uncompressedSize);
                        snapUploadInfo = snapUploadInfo.updateUploadId(uploadId);
                        part = part.updateStateCreated(uploadId, bytesWritten, bytesRead, Boolean.FALSE);
                        partQueue = new ArrayBlockingQueue<SnapshotPart>(queueSize);
                        uploadPartsFuture = Threads.enqueue(serviceConfig, UploadPartTask.class, poolSize,
                                new UploadPartTask(partQueue, progressCallback));
                    }

                    // Check for the future task before adding part to the queue.
                    if (uploadPartsFuture != null && uploadPartsFuture.isDone()) {
                        // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong
                        throw new SnapshotUploadPartException(
                                "Error uploading parts, aborting part creation process. Check previous log messages for the exact error");
                    }

                    // Add part to the queue
                    partQueue.put(part);

                    // Prep the metadata for the next part
                    readOffset += bytesRead;
                    bytesRead = 0L;
                    bytesWritten = 0L;

                    // Setup the part entity for next part
                    zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf((++partNumber)));
                    part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber,
                            readOffset);

                    gzipStream = new GZIPOutputStream(baos);
                    outputStream = new FileOutputStream(zipFilePath.toString());
                }
            }

            gzipStream.close();
            baos.writeTo(outputStream);
            bytesWritten += baos.size();
            baos.reset();
            outputStream.close();
            inputStream.close();

            // Update the part status
            part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.TRUE);

            // Update the snapshot upload info status
            snapUploadInfo = snapUploadInfo.updateStateCreatedParts(partNumber);
        } catch (Exception e) {
            LOG.error("Failed to upload " + snapshotId + " due to: ", e);
            error = Boolean.TRUE;
            throw new SnapshotTransferException("Failed to upload " + snapshotId + " due to: ", e);
        } finally {
            if (inputStream != null) {
                inputStream.close();
            }
            if (gzipStream != null) {
                gzipStream.close();
            }
            if (outputStream != null) {
                outputStream.close();
            }
            baos.reset();
        }

        if (partNumber > 1) {
            // Check for the future task before adding the last part to the queue.
            if (uploadPartsFuture != null && uploadPartsFuture.isDone()) {
                // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong
                throw new SnapshotUploadPartException(
                        "Error uploading parts, aborting part upload process. Check previous log messages for the exact error");
            }
            // Add the last part to the queue
            partQueue.put(part);
            // Kick off the completion task
            completeUploadFuture = Threads.enqueue(serviceConfig, CompleteMpuTask.class, poolSize,
                    new CompleteMpuTask(uploadPartsFuture, snapUploadInfo, partNumber));
        } else {
            try {
                LOG.info("Uploading snapshot " + snapshotId
                        + " to objectstorage as a single object. Compressed size of snapshot (" + bytesWritten
                        + " bytes) is less than minimum part size (" + partSize
                        + " bytes) for multipart upload");
                PutObjectResult putResult = uploadSnapshotAsSingleObject(zipFilePath.toString(), bytesWritten,
                        uncompressedSize, progressCallback);
                markSnapshotAvailable();
                try {
                    part = part.updateStateUploaded(putResult.getETag());
                    snapUploadInfo = snapUploadInfo.updateStateUploaded(putResult.getETag());
                } catch (Exception e) {
                    LOG.debug("Failed to update status in DB for " + snapUploadInfo);
                }
                LOG.info("Uploaded snapshot " + snapshotId + " to objectstorage");
            } catch (Exception e) {
                error = Boolean.TRUE;
                LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e);
                throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e);
            } finally {
                deleteFile(zipFilePath);
            }
        }
    } catch (SnapshotTransferException e) {
        error = Boolean.TRUE;
        throw e;
    } catch (Exception e) {
        error = Boolean.TRUE;
        LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e);
        throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e);
    } finally {
        if (error) {
            abortUpload(snapUploadInfo);
            if (uploadPartsFuture != null && !uploadPartsFuture.isDone()) {
                uploadPartsFuture.cancel(true);
            }
            if (completeUploadFuture != null && !completeUploadFuture.isDone()) {
                completeUploadFuture.cancel(true);
            }
        }
    }
}

From source file:org.apache.nifi.controller.StandardFlowService.java

@Override
public StandardDataFlow createDataFlow() throws IOException {
    final byte[] snippetBytes = controller.getSnippetManager().export();
    final byte[] authorizerFingerprint = getAuthorizerFingerprint();

    // Load the flow from disk if the file exists.
    if (dao.isFlowPresent()) {
        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
        dao.load(baos);//from   w  w  w . j a  v a 2 s .c  om
        final byte[] bytes = baos.toByteArray();
        final StandardDataFlow fromDisk = new StandardDataFlow(bytes, snippetBytes, authorizerFingerprint);
        return fromDisk;
    }

    // Flow from disk does not exist, so serialize the Flow Controller and use that.
    // This is done because on startup, if there is no flow, the Flow Controller
    // will automatically create a Root Process Group, and we need to ensure that
    // we replicate that Process Group to all nodes in the cluster, so that they all
    // end up with the same ID for the root Process Group.
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    dao.save(controller, baos);
    final byte[] flowBytes = baos.toByteArray();
    baos.reset();

    return new StandardDataFlow(flowBytes, snippetBytes, authorizerFingerprint);
}

From source file:org.apache.jmeter.protocol.amf.proxy.AmfRequestHdr.java

/**
 * Parses a http header from a stream.//from   www  .  j  a  va 2s . c om
 *
 * @param in
 *            the stream to parse.
 * @return array of bytes from client.
 */
public byte[] parse(InputStream in) throws IOException {
    boolean inHeaders = true;
    int readLength = 0;
    int dataLength = 0;
    boolean firstLine = true;
    ByteArrayOutputStream clientRequest = new ByteArrayOutputStream();
    ByteArrayOutputStream line = new ByteArrayOutputStream();
    int x;
    while ((inHeaders || readLength < dataLength) && ((x = in.read()) != -1)) {
        line.write(x);
        clientRequest.write(x);
        if (firstLine && !CharUtils.isAscii((char) x)) {// includes \n
            throw new IllegalArgumentException("Only ASCII supported in headers (perhaps SSL was used?)");
        }
        if (inHeaders && (byte) x == (byte) '\n') { // $NON-NLS-1$
            if (line.size() < 3) {
                inHeaders = false;
                firstLine = false; // cannot be first line either
            }
            if (firstLine) {
                parseFirstLine(line.toString());
                firstLine = false;
            } else {
                // parse other header lines, looking for Content-Length
                final int contentLen = parseLine(line.toString());
                if (contentLen > 0) {
                    dataLength = contentLen; // Save the last valid content length one
                }
            }
            if (log.isDebugEnabled()) {
                log.debug("Client Request Line: " + line.toString());
            }
            line.reset();
        } else if (!inHeaders) {
            readLength++;
        }
    }
    // Keep the raw post data
    rawPostData = line.toByteArray();

    if (log.isDebugEnabled()) {
        log.debug("rawPostData in default JRE encoding: " + new String(rawPostData)); // TODO - charset?
        log.debug("Request: " + clientRequest.toString());
    }
    return clientRequest.toByteArray();
}