Example usage for java.io PushbackInputStream PushbackInputStream

List of usage examples for java.io PushbackInputStream PushbackInputStream

Introduction

In this page you can find the example usage for java.io PushbackInputStream PushbackInputStream.

Prototype

public PushbackInputStream(InputStream in, int size) 

Source Link

Document

Creates a PushbackInputStream with a pushback buffer of the specified size, and saves its argument, the input stream in, for later use.

Usage

From source file:org.apache.nutch.protocol.htmlunit.HttpResponse.java

/**
 * Default public constructor./*from ww  w . ja  v  a 2 s. c om*/
 * @param http
 * @param url
 * @param datum
 * @throws ProtocolException
 * @throws IOException
 */
public HttpResponse(HttpBase http, URL url, CrawlDatum datum) throws ProtocolException, IOException {

    this.http = http;
    this.url = url;
    this.orig = url.toString();
    this.base = url.toString();

    if (!"http".equals(url.getProtocol()) || !!"https".equals(url.getProtocol()))
        throw new HttpException("Not an HTTP url:" + url);

    if (Http.LOG.isTraceEnabled()) {
        Http.LOG.trace("fetching " + url);
    }

    String path = "".equals(url.getFile()) ? "/" : url.getFile();

    // some servers will redirect a request with a host line like
    // "Host: <hostname>:80" to "http://<hpstname>/<orig_path>"- they
    // don't want the :80...

    String host = url.getHost();
    int port;
    String portString;
    if (url.getPort() == -1) {
        port = 80;
        portString = "";
    } else {
        port = url.getPort();
        portString = ":" + port;
    }
    Socket socket = null;

    try {
        socket = new Socket(); // create the socket
        socket.setSoTimeout(http.getTimeout());

        // connect
        String sockHost = http.useProxy() ? http.getProxyHost() : host;
        int sockPort = http.useProxy() ? http.getProxyPort() : port;
        InetSocketAddress sockAddr = new InetSocketAddress(sockHost, sockPort);
        socket.connect(sockAddr, http.getTimeout());

        this.conf = http.getConf();

        this.htmlParseFilters = (HtmlParseFilter[]) PluginRepository.get(conf).getOrderedPlugins(
                HtmlParseFilter.class, HtmlParseFilter.X_POINT_ID, HtmlParseFilters.HTMLPARSEFILTER_ORDER);

        if (sockAddr != null && conf.getBoolean("store.ip.address", false) == true) {
            headers.add("_ip_", sockAddr.getAddress().getHostAddress());
        }

        // make request
        OutputStream req = socket.getOutputStream();

        StringBuffer reqStr = new StringBuffer("GET ");
        if (http.useProxy()) {
            reqStr.append(url.getProtocol() + "://" + host + portString + path);
        } else {
            reqStr.append(path);
        }

        reqStr.append(" HTTP/1.0\r\n");

        reqStr.append("Host: ");
        reqStr.append(host);
        reqStr.append(portString);
        reqStr.append("\r\n");

        reqStr.append("Accept-Encoding: x-gzip, gzip, deflate\r\n");

        String userAgent = http.getUserAgent();
        if ((userAgent == null) || (userAgent.length() == 0)) {
            if (Http.LOG.isErrorEnabled()) {
                Http.LOG.error("User-agent is not set!");
            }
        } else {
            reqStr.append("User-Agent: ");
            reqStr.append(userAgent);
            reqStr.append("\r\n");
        }

        reqStr.append("Accept-Language: ");
        reqStr.append(this.http.getAcceptLanguage());
        reqStr.append("\r\n");

        reqStr.append("Accept: ");
        reqStr.append(this.http.getAccept());
        reqStr.append("\r\n");

        if (datum.getModifiedTime() > 0) {
            reqStr.append("If-Modified-Since: " + HttpDateFormat.toString(datum.getModifiedTime()));
            reqStr.append("\r\n");
        }
        reqStr.append("\r\n");

        byte[] reqBytes = reqStr.toString().getBytes();

        req.write(reqBytes);
        req.flush();

        PushbackInputStream in = // process response
                new PushbackInputStream(new BufferedInputStream(socket.getInputStream(), Http.BUFFER_SIZE),
                        Http.BUFFER_SIZE);

        StringBuffer line = new StringBuffer();

        boolean haveSeenNonContinueStatus = false;
        while (!haveSeenNonContinueStatus) {
            // parse status code line
            this.code = parseStatusLine(in, line);
            // parse headers
            parseHeaders(in, line);
            haveSeenNonContinueStatus = code != 100; // 100 is "Continue"
        }

        if (this.code == 200 && !url.toString().endsWith("robots.txt")) {
            readPlainContent(url);
        } else {
            readPlainContent(in);
        }

        try {
            byte[] decodeContent = null;
            String contentEncoding = getHeader(Response.CONTENT_ENCODING);
            if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) {
                decodeContent = http.processGzipEncoded(content, url);
            } else if ("deflate".equals(contentEncoding)) {
                decodeContent = http.processDeflateEncoded(content, url);
            } else {
                if (Http.LOG.isTraceEnabled()) {
                    Http.LOG.trace("fetched " + content.length + " bytes from " + url);
                }
            }
            if (decodeContent != null) {
                content = decodeContent;
            }
        } catch (Exception e) {
            headers.remove(Response.CONTENT_ENCODING);
        }
    } finally {
        if (socket != null)
            socket.close();
    }

}

From source file:com.handywedge.binarystore.store.azure.BinaryStoreManagerImpl.java

@SuppressWarnings("unused")
@Override//from  ww  w  . j av a  2 s.co m
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.info("ABS update method: start.");

    logger.debug("" + storage.toString());
    logger.debug("?" + binary.toString());

    long startSingle = System.currentTimeMillis();

    CloudBlobClient bClient = getABSClient(binary.getBucketName(), true);

    BinaryInfo rtnBinary = new BinaryInfo();
    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);
        CloudBlockBlob blob = bClient.getContainerReference(binary.getBucketName())
                .getBlockBlobReference(binary.getFileName());

        if (written < BINARY_PART_SIZE_5MB) {
            BlobOutputStream blobOutputStream = blob.openOutputStream();

            int next = awsInputStream.read();
            while (next != -1) {
                blobOutputStream.write(next);
                next = awsInputStream.read();
            }
            blobOutputStream.close();

            blob.downloadAttributes();
            BlobProperties properties = blob.getProperties();
            properties.setContentType(binary.getContentType());
            blob.uploadProperties();

        } else {
            int firstByte = 0;
            int partNumber = 1;
            Boolean isFirstChunck = true;
            Boolean overSizeLimit = false;
            List<BlockEntry> blockList = new ArrayList<BlockEntry>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(
                        UUID.randomUUID().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");
                tempFile.deleteOnExit();
                OutputStream os = null;
                try {
                    os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                    if (isFirstChunck == true) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > BINARY_PART_SIZE_5MB * 1024) { // 5GB
                        overSizeLimit = true;
                        logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                } finally {
                    IOUtils.closeQuietly(os);
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                String blockId = Base64.encodeBase64String(
                        String.format("BlockId%07d", partNumber).getBytes(StandardCharsets.UTF_8));
                BlockEntry block = new BlockEntry(blockId);
                blockList.add(block);
                blob.uploadBlock(blockId, chunk, partSize);

                partNumber++;
                chunk.close();
            }

            blob.commitBlockList(blockList);

            blob.downloadAttributes();
            BlobProperties properties = blob.getProperties();
            properties.setContentType(binary.getContentType());
            blob.uploadProperties();

            logger.debug("commitBlockList.");
        }

        if (blob.exists()) {
            rtnBinary = createReturnBinaryInfo(blob);
        } else {
            rtnBinary = binary;
        }

    } catch (StorageException se) {
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, se,
                binary.getFileName());
    } catch (URISyntaxException ue) {
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ue,
                binary.getFileName());
    } catch (FileNotFoundException fe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, fe,
                binary.getFileName());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.getFileName());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    long endSingle = System.currentTimeMillis();
    logger.info("{} Geted : {} ms\n", binary.getFileName(), (endSingle - startSingle));

    logger.info("ABS update method: end.");
    return rtnBinary;
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???/* w  w w  . j  ava2s .c o  m*/
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:it.unifi.rcl.chess.traceanalysis.Trace.java

/**
 * Checks if the input stream is compressed, and in case it returns a GZIPInputStream
 * @param stream//  w ww. j  av  a  2  s.  c  o  m
 * @return
 * @throws IOException
 */
private static boolean checkGZIP(InputStream stream) throws IOException {
    PushbackInputStream pb = new PushbackInputStream(stream, 2); //we need a pushbackstream to look ahead
    byte[] signature = new byte[2];
    pb.read(signature); //read the signature
    pb.unread(signature); //push back the signature to the stream
    if (signature[0] == (byte) 0x1f && signature[1] == (byte) 0x8b) //check if matches standard gzip magic number
        return true;
    else
        return false;
}

From source file:org.disrupted.rumble.network.protocols.firechat.workers.FirechatOverBluetooth.java

@Override
public void processingPacketFromNetwork() {
    try {//from   w w w.  j a  v  a  2s. c o m
        final int CR = 13;
        final int LF = 10;
        pbin = new PushbackInputStream(((BluetoothConnection) con).getInputStream(), BUFFER_SIZE);

        while (true) {
            byte[] buffer = new byte[BUFFER_SIZE];
            int count = pbin.read(buffer, 0, BUFFER_SIZE);

            int i = 0;
            char currentCharVal = (char) buffer[i++];
            while ((currentCharVal != CR) && (currentCharVal != LF) && (i < count))
                currentCharVal = (char) buffer[i++];

            if ((currentCharVal != CR) && (currentCharVal != LF)) {
                //whatever it was, it was not a Firechat message
                buffer = null;
            } else {
                try {
                    pbin.unread(buffer, i, count - i);
                    String jsonString = new String(buffer, 0, i - 1);
                    JSONObject message = new JSONObject(jsonString);

                    ChatMessage status = parser.networkToChatMessage(message);
                    String filename = downloadFile(status.getFileSize());
                    if (filename != null) {
                        status.setAttachedFile(filename);
                    }

                    /*
                     * It is very important to post an event as it will be catch by the
                     * CacheManager and will update the database accordingly
                     */
                    EventBus.getDefault().post(new ChatMessageReceived(status, this));
                } catch (JSONException ignore) {
                    Log.d(TAG, "malformed JSON");
                } catch (IOException e) {
                    Log.e(TAG, "[!] Error while unread" + e.toString());
                }
            }
        }
    } catch (IOException silentlyCloseConnection) {
        Log.d(TAG, silentlyCloseConnection.getMessage());
    } catch (InputOutputStreamException silentlyCloseConnection) {
        Log.d(TAG, silentlyCloseConnection.getMessage());
    }
}

From source file:org.apache.cxf.systest.jaxrs.JAXRSMultipartTest.java

@Test
public void testBookAsMassiveAttachment() throws Exception {
    //CXF-5842/*from  w w  w  .  j  av  a  2 s .  c om*/
    int orig = countTempFiles();
    String address = "http://localhost:" + PORT + "/bookstore/books/attachments";
    InputStream is = getClass().getResourceAsStream("/org/apache/cxf/systest/jaxrs/resources/attachmentData");
    //create a stream that sticks a bunch of data for the attachement to cause the
    //server to buffer the attachment to disk.
    PushbackInputStream buf = new PushbackInputStream(is, 1024 * 20) {
        int bcount = -1;

        @Override
        public int read(byte b[], int offset, int len) throws IOException {
            if (bcount >= 0 && bcount < 1024 * 50) {
                for (int x = 0; x < len; x++) {
                    b[offset + x] = (byte) x;
                }
                bcount += len;
                return len;
            }
            int i = super.read(b, offset, len);
            for (int x = 0; x < i - 5; x++) {
                if (b[x + offset] == '*' && b[x + offset + 1] == '*' && b[x + offset + 2] == 'D'
                        && b[x + offset + 3] == '*' && b[x + offset + 4] == '*') {
                    super.unread(b, x + offset + 5, i - x - 5);
                    i = x;
                    bcount = 0;
                }
            }
            return i;
        }
    };
    doAddBook("multipart/related", address, buf, 413);
    assertEquals(orig, countTempFiles());
}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

@Override
public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception {

    String uploadFileName = AssetUtils.buildAssetKey(appId, entity);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB);

    byte[] data = baos.toByteArray();

    InputStream awsInputStream = new ByteArrayInputStream(data);

    final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());

    String mimeType = AssetMimeHandler.get().getMimeType(entity, data);

    Boolean overSizeLimit = false;

    EntityManager em = emf.getEntityManager(appId);

    if (written < FIVE_MB) { // total smaller than 5mb

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(written);/*from ww  w. j  a  v a2  s  .c  o  m*/
        om.setContentType(mimeType);
        PutObjectResult result = null;
        result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om);

        String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5()));
        String eTag = result.getETag();

        fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);

        if (md5sum != null)
            fileMetadata.put(AssetUtils.CHECKSUM, md5sum);
        fileMetadata.put(AssetUtils.E_TAG, eTag);

        em.update(entity);

    } else { // bigger than 5mb... dump 5 mb tmp files and upload from them
        written = 0; //reset written to 0, we still haven't wrote anything in fact
        int partNumber = 1;
        int firstByte = 0;
        Boolean isFirstChunck = true;
        List<PartETag> partETags = new ArrayList<PartETag>();

        //get the s3 client in order to initialize the multipart request
        getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName,
                uploadFileName);
        InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest);

        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1);

        // determine max size file allowed, default to 50mb
        long maxSizeBytes = 50 * FileUtils.ONE_MB;
        String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
        if (StringUtils.isNumeric(maxSizeMbString)) {
            maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
        }

        // always allow files up to 5mb
        if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
            maxSizeBytes = 5 * FileUtils.ONE_MB;
        }

        while (-1 != (firstByte = chunckableInputStream.read())) {
            long partSize = 0;
            chunckableInputStream.unread(firstByte);
            File tempFile = File.createTempFile(
                    entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");

            tempFile.deleteOnExit();
            OutputStream os = null;
            try {
                os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                if (isFirstChunck == true) {
                    partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB));
                    isFirstChunck = false;
                } else {
                    partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB));
                }
                written += partSize;

                if (written > maxSizeBytes) {
                    overSizeLimit = true;
                    logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                    break;
                    //set flag here and break out of loop to run abort
                }
            } finally {
                IOUtils.closeQuietly(os);
            }

            FileInputStream chunk = new FileInputStream(tempFile);

            Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
            if (!isLastPart)
                chunckableInputStream.unread(firstByte);

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId())
                    .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk)
                    .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart);
            partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        //check for flag here then abort.
        if (overSizeLimit) {

            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId());

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName);

            MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest);

            //upadte the entity with the error.
            try {
                logger.error("starting update of entity due to oversized asset");
                fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                em.update(entity);
            } catch (Exception e) {
                logger.error("Error updating entity with error message", e);
            }

            int timesIterated = 20;
            //loop and abort all the multipart uploads
            while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {

                getS3Client().abortMultipartUpload(abortRequest);
                Thread.sleep(1000);
                timesIterated--;
                listResult = getS3Client().listMultipartUploads(listRequest);
                if (logger.isDebugEnabled()) {
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }

            }
            if (timesIterated == 0) {
                logger.error("Files parts that couldn't be aborted in 20 seconds are:");
                Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator();
                while (multipartUploadIterator.hasNext()) {
                    logger.error(multipartUploadIterator.next().getKey());
                }
            }
        } else {
            CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId(), partETags);
            CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request);
            fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);
            fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag());
            em.update(entity);
        }
    }
}

From source file:org.apache.james.mailbox.maildir.mail.model.MaildirMessage.java

/**
 * Return the position in the given {@link InputStream} at which the Body of
 * the MailboxMessage starts/*from  w w w.java2 s  .c  om*/
 */
private int bodyStartOctet(InputStream msgIn) throws IOException {
    // we need to pushback maximal 3 bytes
    PushbackInputStream in = new PushbackInputStream(msgIn, 3);
    int localBodyStartOctet = in.available();
    int i;
    int count = 0;
    while ((i = in.read()) != -1 && in.available() > 4) {
        if (i == 0x0D) {
            int a = in.read();
            if (a == 0x0A) {
                int b = in.read();

                if (b == 0x0D) {
                    int c = in.read();

                    if (c == 0x0A) {
                        localBodyStartOctet = count + 4;
                        break;
                    }
                    in.unread(c);
                }
                in.unread(b);
            }
            in.unread(a);
        }
        count++;
    }
    return localBodyStartOctet;
}

From source file:com.handywedge.binarystore.store.gcs.BinaryStoreManagerImpl.java

@SuppressWarnings("unused")
@Override/*from w  ww  . j a  v a  2s.c o  m*/
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.info("GCS update method: start.");

    logger.debug("" + storage.toString());
    logger.debug("?" + binary.toString());

    long startSingle = System.currentTimeMillis();

    Storage gStorage = getGCSClient(binary.getBucketName(), true);

    File tempFile = null;
    logger.info("Uploading a new binary to GCS from a file\n");

    BinaryInfo rtnBinary = new BinaryInfo();
    BlobId blobId = BlobId.of(binary.getBucketName(), binary.getFileName());
    List<Acl> acls = new ArrayList<>();
    acls.add(Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER));
    BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(binary.getContentType())
            .setStorageClass(StorageClass.COLDLINE).setAcl(acls).build();

    ByteArrayOutputStream baos = new ByteArrayOutputStream();

    long written = -1L;
    try {
        written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);
    } catch (IOException e) {
        logger.error("IOUtils.copyLarge ?");
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.OUT_OF_RESOURCE, e);
    }

    byte[] data = baos.toByteArray();
    InputStream gcsInputStream = new ByteArrayInputStream(data);

    if (written < BINARY_PART_SIZE_5MB) {
        Blob blob = gStorage.create(blobInfo, data);
        rtnBinary = createReturnBinaryInfo(blob);
    } else {
        int firstByte = 0;
        int partNumber = 1;
        Boolean isFirstChunck = true;
        Boolean overSizeLimit = false;
        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

        try {
            tempFile = File.createTempFile(UUID.randomUUID().toString().concat(binary.getFileName()), "tmp");
        } catch (IOException e) {
            logger.error("File.createTempFile ???={}",
                    UUID.randomUUID().toString().concat(binary.getFileName()));
            throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.DISK_IO_ERROR, e,
                    UUID.randomUUID().toString().concat(binary.getFileName()));
        }

        try {
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);

                OutputStream os = null;
                try {
                    os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath(), true));

                    if (isFirstChunck == true) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > BINARY_PART_SIZE_5MB * 1024) { // 5GB
                        overSizeLimit = true;
                        logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                } finally {
                    IOUtils.closeQuietly(os);
                }

                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }
            }
        } catch (IOException e) {
            logger.error("??????={}",
                    UUID.randomUUID().toString().concat(binary.getFileName()));
            throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.DISK_IO_ERROR, e,
                    UUID.randomUUID().toString().concat(binary.getFileName()));
        }

        try {
            WriteChannel writer = gStorage.writer(blobInfo);
            byte[] buffer = new byte[1024];
            InputStream input = new FileInputStream(tempFile);
            int limit;
            while ((limit = input.read(buffer)) >= 0) {
                try {
                    writer.write(ByteBuffer.wrap(buffer, 0, limit));
                } catch (Exception ex) {
                    logger.error("?????");
                    throw ex;
                }
            }
        } catch (IOException e) {
            logger.error("Upload???={}", blobInfo.toString());
            throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, e,
                    blobInfo.toString());
        }

        if (null != tempFile && tempFile.exists()) {
            tempFile.delete();
        }
    }

    Blob blob = gStorage.get(blobInfo.getBlobId());
    rtnBinary = createReturnBinaryInfo(blob);

    long endSingle = System.currentTimeMillis();
    logger.info("{} Geted : {} ms\n", binary.getFileName(), (endSingle - startSingle));

    logger.info("GCS update method: end.");
    return binary;
}

From source file:com.digitalpebble.storm.crawler.protocol.http.HttpResponse.java

/**
 * Default public constructor./*  w w w. j ava2 s.c  om*/
 * 
 * @param http
 * @param url
 * @param knownMetadata
 * @throws IOException
 * @throws HttpException
 */
public HttpResponse(HttpProtocol http, URL url, Metadata knownMetadata) throws IOException, HttpException {

    this.http = http;
    this.url = url;

    Scheme scheme = null;

    if ("http".equals(url.getProtocol())) {
        scheme = Scheme.HTTP;
    } else if ("https".equals(url.getProtocol())) {
        scheme = Scheme.HTTPS;
    } else {
        throw new IOException("Unknown scheme (not http/https) for url:" + url);
    }

    String path = "".equals(url.getFile()) ? "/" : url.getFile();

    // some servers will redirect a request with a host line like
    // "Host: <hostname>:80" to "http://<hpstname>/<orig_path>"- they
    // don't want the :80...

    String host = url.getHost();
    int port;
    String portString;
    if (url.getPort() == -1) {
        if (scheme == Scheme.HTTP) {
            port = 80;
        } else {
            port = 443;
        }
        portString = "";
    } else {
        port = url.getPort();
        portString = ":" + port;
    }
    Socket socket = null;

    try {
        socket = new Socket(); // create the socket
        socket.setSoTimeout(http.getTimeout());

        // connect
        String sockHost = http.useProxy() ? http.getProxyHost() : host;
        int sockPort = http.useProxy() ? http.getProxyPort() : port;
        InetSocketAddress sockAddr = new InetSocketAddress(sockHost, sockPort);
        socket.connect(sockAddr, http.getTimeout());

        if (scheme == Scheme.HTTPS) {
            SSLSocketFactory factory = (SSLSocketFactory) SSLSocketFactory.getDefault();
            SSLSocket sslsocket = (SSLSocket) factory.createSocket(socket, sockHost, sockPort, true);
            sslsocket.setUseClientMode(true);

            // Get the protocols and ciphers supported by this JVM
            Set<String> protocols = new HashSet<String>(Arrays.asList(sslsocket.getSupportedProtocols()));
            Set<String> ciphers = new HashSet<String>(Arrays.asList(sslsocket.getSupportedCipherSuites()));

            // Intersect with preferred protocols and ciphers
            protocols.retainAll(http.getTlsPreferredProtocols());
            ciphers.retainAll(http.getTlsPreferredCipherSuites());

            sslsocket.setEnabledProtocols(protocols.toArray(new String[protocols.size()]));
            sslsocket.setEnabledCipherSuites(ciphers.toArray(new String[ciphers.size()]));

            sslsocket.startHandshake();
            socket = sslsocket;
        }

        this.conf = http.getConf();
        if (ConfUtils.getBoolean(conf, "store.ip.address", false) == true) {
            headers.setValue("_ip_", sockAddr.getAddress().getHostAddress());
        }

        // make request
        OutputStream req = socket.getOutputStream();

        StringBuffer reqStr = new StringBuffer("GET ");
        if (http.useProxy()) {
            reqStr.append(url.getProtocol() + "://" + host + portString + path);
        } else {
            reqStr.append(path);
        }

        reqStr.append(" HTTP/1.0\r\n");

        reqStr.append("Host: ");
        reqStr.append(host);
        reqStr.append(portString);
        reqStr.append("\r\n");

        reqStr.append("Accept-Encoding: x-gzip, gzip, deflate\r\n");

        String userAgent = http.getUserAgent();
        if ((userAgent == null) || (userAgent.length() == 0)) {
            if (HttpProtocol.LOGGER.isErrorEnabled()) {
                HttpProtocol.LOGGER.error("User-agent is not set!");
            }
        } else {
            reqStr.append("User-Agent: ");
            reqStr.append(userAgent);
            reqStr.append("\r\n");
        }

        reqStr.append("Accept-Language: ");
        reqStr.append(this.http.getAcceptLanguage());
        reqStr.append("\r\n");

        reqStr.append("Accept: ");
        reqStr.append(this.http.getAccept());
        reqStr.append("\r\n");

        if (knownMetadata != null) {
            String ifModifiedSince = knownMetadata.getFirstValue("cachedLastModified");
            if (StringUtils.isNotBlank(ifModifiedSince)) {
                reqStr.append("If-Modified-Since: ");
                reqStr.append(ifModifiedSince);
                reqStr.append("\r\n");
            }

            String ifNoneMatch = knownMetadata.getFirstValue("cachedEtag");
            if (StringUtils.isNotBlank(ifNoneMatch)) {
                reqStr.append("If-None-Match: ");
                reqStr.append(ifNoneMatch);
                reqStr.append("\r\n");
            }
        }

        reqStr.append("\r\n");

        // @see http://www.w3.org/Protocols/rfc2068/rfc2068.txt for default
        // charset
        // TODO use UTF-8 and set a charset value explicitely
        byte[] reqBytes = reqStr.toString().getBytes(StandardCharsets.ISO_8859_1);

        req.write(reqBytes);
        req.flush();

        PushbackInputStream in = // process response
                new PushbackInputStream(
                        new BufferedInputStream(socket.getInputStream(), HttpProtocol.BUFFER_SIZE),
                        HttpProtocol.BUFFER_SIZE);

        StringBuffer line = new StringBuffer();

        boolean haveSeenNonContinueStatus = false;
        while (!haveSeenNonContinueStatus) {
            // parse status code line
            this.code = parseStatusLine(in, line);
            // parse headers
            parseHeaders(in, line);
            haveSeenNonContinueStatus = code != 100; // 100 is
                                                     // "Continue"
        }
        String transferEncoding = getHeader(HttpHeaders.TRANSFER_ENCODING);
        if (transferEncoding != null && "chunked".equalsIgnoreCase(transferEncoding.trim())) {
            readChunkedContent(in, line);
        } else {
            readPlainContent(in);
        }

        String contentEncoding = getHeader(HttpHeaders.CONTENT_ENCODING);
        if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) {
            content = http.processGzipEncoded(content, url);
        } else if ("deflate".equals(contentEncoding)) {
            content = http.processDeflateEncoded(content, url);
        } else {
            HttpProtocol.LOGGER.trace("fetched {}  bytes from {}", content.length, url);
        }

    } finally {
        if (socket != null)
            socket.close();
    }

}