Example usage for java.io InputStream mark

List of usage examples for java.io InputStream mark

Introduction

In this page you can find the example usage for java.io InputStream mark.

Prototype

public synchronized void mark(int readlimit) 

Source Link

Document

Marks the current position in this input stream.

Usage

From source file:org.springframework.remoting.caucho.HessianExporter.java

/**
 * Actually invoke the skeleton with the given streams.
 * @param skeleton the skeleton to invoke
 * @param inputStream the request stream
 * @param outputStream the response stream
 * @throws Throwable if invocation failed
 *///from w w w  .j ava  2s  .c o  m
protected void doInvoke(HessianSkeleton skeleton, InputStream inputStream, OutputStream outputStream)
        throws Throwable {

    ClassLoader originalClassLoader = overrideThreadContextClassLoader();
    try {
        InputStream isToUse = inputStream;
        OutputStream osToUse = outputStream;

        if (this.debugLogger != null && this.debugLogger.isDebugEnabled()) {
            try (PrintWriter debugWriter = new PrintWriter(new CommonsLogWriter(this.debugLogger))) {
                @SuppressWarnings("resource")
                HessianDebugInputStream dis = new HessianDebugInputStream(inputStream, debugWriter);
                @SuppressWarnings("resource")
                HessianDebugOutputStream dos = new HessianDebugOutputStream(outputStream, debugWriter);
                dis.startTop2();
                dos.startTop2();
                isToUse = dis;
                osToUse = dos;
            }
        }

        if (!isToUse.markSupported()) {
            isToUse = new BufferedInputStream(isToUse);
            isToUse.mark(1);
        }

        int code = isToUse.read();
        int major;
        int minor;

        AbstractHessianInput in;
        AbstractHessianOutput out;

        if (code == 'H') {
            // Hessian 2.0 stream
            major = isToUse.read();
            minor = isToUse.read();
            if (major != 0x02) {
                throw new IOException("Version " + major + '.' + minor + " is not understood");
            }
            in = new Hessian2Input(isToUse);
            out = new Hessian2Output(osToUse);
            in.readCall();
        } else if (code == 'C') {
            // Hessian 2.0 call... for some reason not handled in HessianServlet!
            isToUse.reset();
            in = new Hessian2Input(isToUse);
            out = new Hessian2Output(osToUse);
            in.readCall();
        } else if (code == 'c') {
            // Hessian 1.0 call
            major = isToUse.read();
            minor = isToUse.read();
            in = new HessianInput(isToUse);
            if (major >= 2) {
                out = new Hessian2Output(osToUse);
            } else {
                out = new HessianOutput(osToUse);
            }
        } else {
            throw new IOException(
                    "Expected 'H'/'C' (Hessian 2.0) or 'c' (Hessian 1.0) in hessian input at " + code);
        }

        in.setSerializerFactory(this.serializerFactory);
        out.setSerializerFactory(this.serializerFactory);
        if (this.remoteResolver != null) {
            in.setRemoteResolver(this.remoteResolver);
        }

        try {
            skeleton.invoke(in, out);
        } finally {
            try {
                in.close();
                isToUse.close();
            } catch (IOException ex) {
                // ignore
            }
            try {
                out.close();
                osToUse.close();
            } catch (IOException ex) {
                // ignore
            }
        }
    } finally {
        resetThreadContextClassLoader(originalClassLoader);
    }
}

From source file:org.apache.falcon.resource.AbstractEntityManager.java

protected Entity deserializeEntity(HttpServletRequest request, EntityType entityType)
        throws IOException, FalconException {

    EntityParser<?> entityParser = EntityParserFactory.getParser(entityType);
    InputStream xmlStream = request.getInputStream();
    if (xmlStream.markSupported()) {
        xmlStream.mark(XML_DEBUG_LEN); // mark up to debug len
    }/*  w w  w.  j  a v a 2  s.c  om*/
    try {
        return entityParser.parse(xmlStream);
    } catch (FalconException e) {
        if (LOG.isDebugEnabled() && xmlStream.markSupported()) {
            try {
                xmlStream.reset();
                String xmlData = getAsString(xmlStream);
                LOG.debug("XML DUMP for ({}): {}", entityType, xmlData, e);
            } catch (IOException ignore) {
                // ignore
            }
        }
        throw e;
    }
}

From source file:uk.ac.kcl.tika.parsers.PDFPreprocessorParser.java

@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context)
        throws IOException, SAXException, TikaException {
    ImageMagickConfig config = context.get(ImageMagickConfig.class, DEFAULT_IMAGEMAGICK_CONFIG);

    // If ImageMagick is not on the path with the current config, do not try to run OCR
    // getSupportedTypes shouldn't have listed us as handling it, so this should only
    //  occur if someone directly calls this parser, not via DefaultParser or similar
    //        TemporaryResources tmp = new TemporaryResources();
    //TikaInputStream pdfStream = TikaInputStream.get(stream);
    PDFParser pdfParser = new PDFParser();

    //create temp handlers to investigate object
    BodyContentHandler body = new BodyContentHandler();
    Metadata pdfMetadata = new Metadata();

    //needed to reset stream
    if (stream.markSupported()) {
        stream.mark(Integer.MAX_VALUE);
    }/* www .j  ava 2 s.c o  m*/

    //first do initial parse to see if there's subsantial content in pdf metadata already
    pdfParser.parse(stream, body, pdfMetadata, context);
    stream.reset();
    //if there's content - reparse with official handlers/metadata. What else can you do? Also check imagemagick is available
    if (body.toString().length() > 100 || !hasImageMagick(config)) {
        pdfParser.parse(stream, handler, metadata, context);
        return;
    } else {
        //add the PDF metadata to the official metadata object
        Arrays.asList(pdfMetadata.names()).stream().forEach(name -> {
            metadata.add(name, pdfMetadata.get(name));
        });
    }

    //objects to hold file references for manipulation outside of Java
    File tiffFileOfPDF = null;
    File pdfFileFromStream = File.createTempFile("tempPDF", ".pdf");
    try {

        FileUtils.copyInputStreamToFile(stream, pdfFileFromStream);
        tiffFileOfPDF = File.createTempFile("tempTIFF", ".tiff");
        makeTiffFromPDF(pdfFileFromStream, tiffFileOfPDF, config);
        if (tiffFileOfPDF.exists()) {
            TesseractOCRParser tesseract = new TesseractOCRParser();
            tesseract.parse(FileUtils.openInputStream(tiffFileOfPDF), handler, metadata, context);
        }
    } finally {
        if (tiffFileOfPDF.exists()) {
            tiffFileOfPDF.delete();
        }
        if (pdfFileFromStream.exists()) {
            pdfFileFromStream.delete();
        }
    }
}

From source file:org.mifos.customers.client.struts.action.ClientCustAction.java

@TransactionDemarcate(joinToken = true)
public ActionForward retrievePictureOnPreview(ActionMapping mapping, ActionForm form,
        @SuppressWarnings("unused") HttpServletRequest request, HttpServletResponse response) throws Exception {

    ClientCustActionForm actionForm = (ClientCustActionForm) form;
    InputStream in = actionForm.getPicture().getInputStream();
    in.mark(0);
    response.setContentType("image/jpeg");
    BufferedOutputStream out = new BufferedOutputStream(response.getOutputStream());
    byte[] by = new byte[1024 * 4]; // 4K buffer buf, 0, buf.length
    int index = in.read(by, 0, 1024 * 4);
    while (index != -1) {
        out.write(by, 0, index);// www. j  a  va  2 s  . c  om
        index = in.read(by, 0, 1024 * 4);
    }
    out.flush();
    out.close();
    in.reset();
    String forward = ClientConstants.CUSTOMER_PICTURE_PAGE;
    return mapping.findForward(forward);
}

From source file:edu.harvard.hmdc.dvnplugin.DVNOAIUrlCacher.java

private InputStream checkLoginPage(InputStream input, Properties headers, String lastModified)
        throws IOException {
    LoginPageChecker checker = au.getCrawlSpec().getLoginPageChecker();
    if (checker != null) {
        logger.debug3("Found a login page checker");
        if (!input.markSupported()) {
            input = new BufferedInputStream(input);
        }/* ww  w .j a  v a  2  s. co m*/
        input.mark(LOGIN_BUFFER_MAX);
        Reader reader = new InputStreamReader(input, Constants.DEFAULT_ENCODING);
        try {
            if (checker.isLoginPage(headers, reader)) {
                throw new CacheException.PermissionException("Found a login page");
            } else {
                input = resetInputStream(input, fetchUrl, lastModified);
            }
        } catch (PluginException e) {
            throw new RuntimeException(e);
        }
    } else {
        logger.debug3("Didn't find a login page checker");
    }
    return input;
}

From source file:org.exist.xquery.functions.request.GetData.java

@Override
public Sequence eval(Sequence[] args, Sequence contextSequence) throws XPathException {

    final RequestModule myModule = (RequestModule) context.getModule(RequestModule.NAMESPACE_URI);

    // request object is read from global variable $request
    final Variable var = myModule.resolveVariable(RequestModule.REQUEST_VAR);

    if (var == null || var.getValue() == null) {
        throw new XPathException(this, "No request object found in the current XQuery context.");
    }/*from   ww  w.  ja v  a  2s. c  o m*/

    if (var.getValue().getItemType() != Type.JAVA_OBJECT) {
        throw new XPathException(this, "Variable $request is not bound to an Java object.");
    }

    final JavaObjectValue value = (JavaObjectValue) var.getValue().itemAt(0);

    if (!(value.getObject() instanceof RequestWrapper)) {
        throw new XPathException(this, "Variable $request is not bound to a Request object.");
    }
    final RequestWrapper request = (RequestWrapper) value.getObject();

    //if the content length is unknown or 0, return
    if (request.getContentLength() == -1 || request.getContentLength() == 0) {
        return Sequence.EMPTY_SEQUENCE;
    }

    InputStream isRequest = null;
    Sequence result = Sequence.EMPTY_SEQUENCE;
    try {

        isRequest = request.getInputStream();

        //was there any POST content?
        /**
         * There is a bug in HttpInput.available() in Jetty 7.2.2.v20101205
         * This has been filed as Bug 333415 - https://bugs.eclipse.org/bugs/show_bug.cgi?id=333415
         * It is expected to be fixed in the Jetty 7.3.0 release
         */

        //TODO reinstate call to .available() when Jetty 7.3.0 is released, use of .getContentLength() is not reliable because of http mechanics
        //if(is != null && is.available() > 0) {
        if (isRequest != null && request.getContentLength() > 0) {

            // 1) determine if exists mime database considers this binary data
            String contentType = request.getContentType();
            if (contentType != null) {
                //strip off any charset encoding info
                if (contentType.indexOf(";") > -1) {
                    contentType = contentType.substring(0, contentType.indexOf(";"));
                }

                final MimeType mimeType = MimeTable.getInstance().getContentType(contentType);
                if (mimeType != null && !mimeType.isXMLType()) {

                    //binary data
                    result = BinaryValueFromInputStream.getInstance(context, new Base64BinaryValueType(),
                            isRequest);
                }
            }

            if (result == Sequence.EMPTY_SEQUENCE) {
                //2) not binary, try and parse as an XML documemnt, otherwise 3) return a string representation

                //parsing will consume the stream so we must cache!
                InputStream is = null;
                FilterInputStreamCache cache = null;
                try {
                    //we have to cache the input stream, so we can reread it, as we may use it twice (once for xml attempt and once for string attempt)
                    cache = FilterInputStreamCacheFactory
                            .getCacheInstance(new FilterInputStreamCacheConfiguration() {

                                @Override
                                public String getCacheClass() {
                                    return (String) context.getBroker().getConfiguration()
                                            .getProperty(Configuration.BINARY_CACHE_CLASS_PROPERTY);
                                }
                            });
                    is = new CachingFilterInputStream(cache, isRequest);

                    //mark the start of the stream
                    is.mark(Integer.MAX_VALUE);

                    //2) try and  parse as XML
                    result = parseAsXml(is);

                    if (result == Sequence.EMPTY_SEQUENCE) {
                        // 3) not a valid XML document, return a string representation of the document
                        String encoding = request.getCharacterEncoding();
                        if (encoding == null) {
                            encoding = "UTF-8";
                        }

                        try {
                            //reset the stream, as we need to reuse for string parsing after the XML parsing happened
                            is.reset();

                            result = parseAsString(is, encoding);
                        } catch (final IOException ioe) {
                            throw new XPathException(this, "An IO exception occurred: " + ioe.getMessage(),
                                    ioe);
                        }
                    }

                } finally {
                    if (cache != null) {
                        try {
                            cache.invalidate();
                        } catch (final IOException ioe) {
                            LOG.error(ioe.getMessage(), ioe);
                        }
                    }

                    if (is != null) {
                        try {
                            is.close();
                        } catch (final IOException ioe) {
                            LOG.error(ioe.getMessage(), ioe);
                        }
                    }
                }
            }

            //NOTE we do not close isRequest, because it may be needed further by the caching input stream wrapper
        }
    } catch (final IOException ioe) {
        throw new XPathException(this, "An IO exception occurred: " + ioe.getMessage(), ioe);
    }

    return result;
}

From source file:edu.harvard.iq.dvn.lockss.plugin.DVNOAIUrlCacher.java

private InputStream checkLoginPage(InputStream input, Properties headers, String lastModified)
        throws IOException {
    LoginPageChecker checker = au.getCrawlSpec().getLoginPageChecker();
    if (checker != null) {
        logger.debug3("Found a login page checker");
        if (!input.markSupported()) {
            input = new BufferedInputStream(input);
        }//  w  ww.j av  a 2  s .  c  o  m
        input.mark(CurrentConfig.getIntParam(PARAM_LOGIN_CHECKER_MARK_LIMIT, DEFAULT_LOGIN_CHECKER_MARK_LIMIT));
        Reader reader = new InputStreamReader(input, Constants.DEFAULT_ENCODING);
        try {
            if (checker.isLoginPage(headers, reader)) {
                throw new CacheException.PermissionException("Found a login page");
            } else {
                input = resetInputStream(input, fetchUrl, lastModified);
            }
        } catch (PluginException e) {
            throw new RuntimeException(e);
        }
    } else {
        logger.debug3("Didn't find a login page checker");
    }
    return input;
}

From source file:com.amazonaws.http.AmazonHttpClient.java

/**
 * Internal method to execute the HTTP method given.
 *
 * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler)
 * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext)
 *///from ww  w .  j ava  2s  .c  o  m
private <T> Response<T> executeHelper(final Request<?> request,
        HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler,
        HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext)
        throws AmazonClientException, AmazonServiceException {
    /*
     * Depending on which response handler we end up choosing to handle the
     * HTTP response, it might require us to leave the underlying HTTP
     * connection open, depending on whether or not it reads the complete
     * HTTP response stream from the HTTP connection, or if delays reading
     * any of the content until after a response is returned to the caller.
     */
    boolean leaveHttpConnectionOpen = false;
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    /* add the service endpoint to the logs. You can infer service name from service endpoint */
    awsRequestMetrics.addProperty(Field.ServiceName, request.getServiceName());
    awsRequestMetrics.addProperty(Field.ServiceEndpoint, request.getEndpoint());

    // Apply whatever request options we know how to handle, such as user-agent.
    setUserAgent(request);
    int requestCount = 0;
    URI redirectedURI = null;
    HttpEntity entity = null;
    AmazonClientException retriedException = null;

    // Make a copy of the original request params and headers so that we can
    // permute it in this loop and start over with the original every time.
    Map<String, String> originalParameters = new LinkedHashMap<String, String>();
    originalParameters.putAll(request.getParameters());
    Map<String, String> originalHeaders = new HashMap<String, String>();
    originalHeaders.putAll(request.getHeaders());
    final AWSCredentials credentials = executionContext.getCredentials();
    AmazonWebServiceRequest awsreq = request.getOriginalRequest();
    ProgressListener listener = awsreq.getGeneralProgressListener();
    Signer signer = null;

    while (true) {
        ++requestCount;
        awsRequestMetrics.setCounter(Field.RequestCount, requestCount);
        if (requestCount > 1) { // retry
            request.setParameters(originalParameters);
            request.setHeaders(originalHeaders);
        }
        HttpRequestBase httpRequest = null;
        org.apache.http.HttpResponse apacheResponse = null;

        try {
            // Sign the request if a signer was provided
            if (signer == null)
                signer = executionContext.getSignerByURI(request.getEndpoint());
            if (signer != null && credentials != null) {
                awsRequestMetrics.startEvent(Field.RequestSigningTime);
                try {
                    signer.sign(request, credentials);
                } finally {
                    awsRequestMetrics.endEvent(Field.RequestSigningTime);
                }
            }

            if (requestLog.isDebugEnabled()) {
                requestLog.debug("Sending Request: " + request.toString());
            }

            httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext);

            if (httpRequest instanceof HttpEntityEnclosingRequest) {
                entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity();
            }

            if (redirectedURI != null) {
                httpRequest.setURI(redirectedURI);
            }

            if (requestCount > 1) { // retry
                // Notify the progress listener of the retry
                publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT);

                awsRequestMetrics.startEvent(Field.RetryPauseTime);
                try {
                    pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount,
                            config.getRetryPolicy());
                } finally {
                    awsRequestMetrics.endEvent(Field.RetryPauseTime);
                }
            }

            if (entity != null) {
                InputStream content = entity.getContent();
                if (requestCount > 1) { // retry
                    if (content.markSupported()) {
                        content.reset();
                        content.mark(-1);
                    }
                } else {
                    if (content.markSupported()) {
                        content.mark(-1);
                    }
                }
            }

            captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics);
            HttpContext httpContext = new BasicHttpContext();
            httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics);
            retriedException = null;

            publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT);
            awsRequestMetrics.startEvent(Field.HttpRequestTime);
            try {
                apacheResponse = httpClient.execute(httpRequest, httpContext);
            } finally {
                awsRequestMetrics.endEvent(Field.HttpRequestTime);
            }
            publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT);

            if (isRequestSuccessful(apacheResponse)) {
                awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode());
                /*
                 * If we get back any 2xx status code, then we know we should
                 * treat the service call as successful.
                 */
                leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen();
                HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse);
                T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse,
                        executionContext);
                return new Response<T>(response, httpResponse);
            } else if (isTemporaryRedirect(apacheResponse)) {
                /*
                 * S3 sends 307 Temporary Redirects if you try to delete an
                 * EU bucket from the US endpoint. If we get a 307, we'll
                 * point the HTTP method to the redirected location, and let
                 * the next retry deliver the request to the right location.
                 */
                Header[] locationHeaders = apacheResponse.getHeaders("location");
                String redirectedLocation = locationHeaders[0].getValue();
                log.debug("Redirecting to: " + redirectedLocation);
                redirectedURI = URI.create(redirectedLocation);
                httpRequest.setURI(redirectedURI);
                awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode());
                awsRequestMetrics.addProperty(Field.RedirectLocation, redirectedLocation);
                awsRequestMetrics.addProperty(Field.AWSRequestID, null);

            } else {
                leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen();
                AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest,
                        apacheResponse);
                awsRequestMetrics.addProperty(Field.AWSRequestID, ase.getRequestId());
                awsRequestMetrics.addProperty(Field.AWSErrorCode, ase.getErrorCode());
                awsRequestMetrics.addProperty(Field.StatusCode, ase.getStatusCode());

                if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount,
                        config.getRetryPolicy())) {
                    throw ase;
                }

                // Cache the retryable exception
                retriedException = ase;
                /*
                 * Checking for clock skew error again because we don't want to set the
                 * global time offset for every service exception.
                 */
                if (RetryUtils.isClockSkewError(ase)) {
                    int timeOffset = parseClockSkewOffset(apacheResponse, ase);
                    SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset);
                }
                resetRequestAfterError(request, ase);
            }
        } catch (IOException ioe) {
            if (log.isInfoEnabled()) {
                log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe);
            }
            awsRequestMetrics.incrementCounter(Field.Exception);
            awsRequestMetrics.addProperty(Field.Exception, ioe);
            awsRequestMetrics.addProperty(Field.AWSRequestID, null);

            AmazonClientException ace = new AmazonClientException(
                    "Unable to execute HTTP request: " + ioe.getMessage(), ioe);
            if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount,
                    config.getRetryPolicy())) {
                throw ace;
            }

            // Cache the retryable exception
            retriedException = ace;
            resetRequestAfterError(request, ioe);
        } catch (RuntimeException e) {
            throw handleUnexpectedFailure(e, awsRequestMetrics);
        } catch (Error e) {
            throw handleUnexpectedFailure(e, awsRequestMetrics);
        } finally {
            /*
             * Some response handlers need to manually manage the HTTP
             * connection and will take care of releasing the connection on
             * their own, but if this response handler doesn't need the
             * connection left open, we go ahead and release the it to free
             * up resources.
             */
            if (!leaveHttpConnectionOpen) {
                try {
                    if (apacheResponse != null && apacheResponse.getEntity() != null
                            && apacheResponse.getEntity().getContent() != null) {
                        apacheResponse.getEntity().getContent().close();
                    }
                } catch (IOException e) {
                    log.warn("Cannot close the response content.", e);
                }
            }
        }
    } /* end while (true) */
}

From source file:org.apache.tika.parser.microsoft.POIFSContainerDetector.java

public MediaType detect(InputStream input, Metadata metadata) throws IOException {
    // Check if we have access to the document
    if (input == null) {
        return MediaType.OCTET_STREAM;
    }/*  ww w  .ja  v a 2  s.c  o m*/

    // If this is a TikaInputStream wrapping an already
    // parsed NPOIFileSystem/DirectoryNode, just get the
    // names from the root:
    TikaInputStream tis = TikaInputStream.cast(input);
    Set<String> names = null;
    if (tis != null) {
        Object container = tis.getOpenContainer();
        if (container instanceof NPOIFSFileSystem) {
            names = getTopLevelNames(((NPOIFSFileSystem) container).getRoot());
        } else if (container instanceof DirectoryNode) {
            names = getTopLevelNames((DirectoryNode) container);
        }
    }

    if (names == null) {
        // Check if the document starts with the OLE header
        input.mark(8);
        try {
            if (input.read() != 0xd0 || input.read() != 0xcf || input.read() != 0x11 || input.read() != 0xe0
                    || input.read() != 0xa1 || input.read() != 0xb1 || input.read() != 0x1a
                    || input.read() != 0xe1) {
                return MediaType.OCTET_STREAM;
            }
        } finally {
            input.reset();
        }
    }

    // We can only detect the exact type when given a TikaInputStream
    if (names == null && tis != null) {
        // Look for known top level entry names to detect the document type
        names = getTopLevelNames(tis);
    }

    // Detect based on the names (as available)
    if (tis != null && tis.getOpenContainer() != null && tis.getOpenContainer() instanceof NPOIFSFileSystem) {
        return detect(names, ((NPOIFSFileSystem) tis.getOpenContainer()).getRoot());
    } else {
        return detect(names, null);
    }
}

From source file:org.mule.transport.http.multipart.MultiPartInputStream.java

private byte[] readLine(InputStream in) throws IOException {
    byte[] buf = new byte[256];

    int i = 0;//from  ww  w . ja v  a2 s.c  o  m
    int loops = 0;
    int ch = 0;

    while (true) {
        ch = in.read();
        if (ch < 0)
            break;
        loops++;

        // skip a leading LF's
        if (loops == 1 && ch == LF)
            continue;

        if (ch == CR || ch == LF)
            break;

        if (i >= buf.length) {
            byte[] old_buf = buf;
            buf = new byte[old_buf.length + 256];
            System.arraycopy(old_buf, 0, buf, 0, old_buf.length);
        }
        buf[i++] = (byte) ch;
    }

    if (ch == -1 && i == 0)
        return null;

    // skip a trailing LF if it exists
    if (ch == CR && in.available() >= 1 && in.markSupported()) {
        in.mark(1);
        ch = in.read();
        if (ch != LF)
            in.reset();
    }

    byte[] old_buf = buf;
    buf = new byte[i];
    System.arraycopy(old_buf, 0, buf, 0, i);

    return buf;
}