Example usage for org.apache.http.entity InputStreamEntity InputStreamEntity

List of usage examples for org.apache.http.entity InputStreamEntity InputStreamEntity

Introduction

In this page you can find the example usage for org.apache.http.entity InputStreamEntity InputStreamEntity.

Prototype

public InputStreamEntity(InputStream inputStream, ContentType contentType) 

Source Link

Usage

From source file:jp.mixi.android.sdk.MixiContainerImpl.java

/**
 * gzip??HttpEntity?//from ww w .j a v a 2 s  .  c  o  m
 * 
 * @param entity ??HttpEntity
 * @return ?HttpEntity
 * @throws IllegalStateException
 * @throws IOException
 */
private HttpEntity decompressesGZipEntity(HttpEntity entity) throws IllegalStateException, IOException {
    return new InputStreamEntity(new GZIPInputStream(entity.getContent()), 0);
}

From source file:ch.iterate.openstack.swift.Client.java

/**
 * Store a file on the server, including metadata, with the contents coming from an input stream.  This allows you to
 * not know the entire length of your content when you start to write it.  Nor do you have to hold it entirely in memory
 * at the same time./*  w w  w . j  a v a  2  s .  co m*/
 *
 * @param container   The name of the container
 * @param data        Any object that implements InputStream
 * @param contentType The MIME type of the file
 * @param name        The name of the file on the server
 * @param metadata    A map with the metadata as key names and values as the metadata values
 * @return True if response code is 201
 * @throws GenericException Unexpected response
 */
public String storeObject(Region region, String container, InputStream data, String contentType, String name,
        Map<String, String> metadata) throws IOException {
    HttpPut method = new HttpPut(region.getStorageUrl(container, name));
    InputStreamEntity entity = new InputStreamEntity(data, -1);
    entity.setChunked(true);
    entity.setContentType(contentType);
    method.setEntity(entity);
    for (Map.Entry<String, String> key : this.renameObjectMetadata(metadata).entrySet()) {
        method.setHeader(key.getKey(), key.getValue());
    }
    Response response = this.execute(method, new DefaultResponseHandler());
    if (response.getStatusCode() == HttpStatus.SC_CREATED) {
        return response.getResponseHeader(HttpHeaders.ETAG).getValue();
    } else {
        throw new GenericException(response);
    }
}

From source file:org.springframework.extensions.webscripts.connector.RemoteClient.java

/**
 * Service a remote URL and write the the result into an output stream.
 * If an InputStream is provided then a POST will be performed with the content
 * pushed to the url. Otherwise a standard GET will be performed.
 * //from w  ww . j  ava  2s.  co m
 * @param url    The URL to open and retrieve data from
 * @param in     The optional InputStream - if set a POST or similar will be performed
 * @param out    The OutputStream to write result to
 * @param res    Optional HttpServletResponse - to which response headers will be copied - i.e. proxied
 * @param status The status object to apply the response code too
 * 
 * @return encoding specified by the source URL - may be null
 * 
 * @throws IOException
 */
private String service(URL url, InputStream in, OutputStream out, HttpServletRequest req,
        HttpServletResponse res, ResponseStatus status) throws IOException {
    final boolean trace = logger.isTraceEnabled();
    final boolean debug = logger.isDebugEnabled();
    if (debug) {
        logger.debug("Executing " + "(" + requestMethod + ") " + url.toString());
        if (in != null)
            logger.debug(" - InputStream supplied - will push...");
        if (out != null)
            logger.debug(" - OutputStream supplied - will stream response...");
        if (req != null && res != null)
            logger.debug(" - Full Proxy mode between servlet request and response...");
    }

    // aquire and configure the HttpClient
    HttpClient httpClient = createHttpClient(url);

    URL redirectURL = url;
    HttpResponse response;
    HttpRequestBase method = null;
    int retries = 0;
    // Only process redirects if we are not processing a 'push'
    int maxRetries = in == null ? this.maxRedirects : 1;
    try {
        do {
            // Release a previous method that we processed due to a redirect
            if (method != null) {
                method.reset();
                method = null;
            }

            switch (this.requestMethod) {
            default:
            case GET:
                method = new HttpGet(redirectURL.toString());
                break;
            case PUT:
                method = new HttpPut(redirectURL.toString());
                break;
            case POST:
                method = new HttpPost(redirectURL.toString());
                break;
            case DELETE:
                method = new HttpDelete(redirectURL.toString());
                break;
            case HEAD:
                method = new HttpHead(redirectURL.toString());
                break;
            case OPTIONS:
                method = new HttpOptions(redirectURL.toString());
                break;
            }

            // proxy over any headers from the request stream to proxied request
            if (req != null) {
                Enumeration<String> headers = req.getHeaderNames();
                while (headers.hasMoreElements()) {
                    String key = headers.nextElement();
                    if (key != null) {
                        key = key.toLowerCase();
                        if (!this.removeRequestHeaders.contains(key) && !this.requestProperties.containsKey(key)
                                && !this.requestHeaders.containsKey(key)) {
                            method.setHeader(key, req.getHeader(key));
                            if (trace)
                                logger.trace("Proxy request header: " + key + "=" + req.getHeader(key));
                        }
                    }
                }
            }

            // apply request properties, allows for the assignment and override of specific header properties
            // firstly pre-configured headers are applied and overridden/augmented by runtime request properties 
            final Map<String, String> headers = (Map<String, String>) this.requestHeaders.clone();
            headers.putAll(this.requestProperties);
            if (headers.size() != 0) {
                for (Map.Entry<String, String> entry : headers.entrySet()) {
                    String headerName = entry.getKey();
                    String headerValue = headers.get(headerName);
                    if (headerValue != null) {
                        method.setHeader(headerName, headerValue);
                    }
                    if (trace)
                        logger.trace("Set request header: " + headerName + "=" + headerValue);
                }
            }

            // Apply cookies
            if (this.cookies != null && !this.cookies.isEmpty()) {
                StringBuilder builder = new StringBuilder(128);
                for (Map.Entry<String, String> entry : this.cookies.entrySet()) {
                    if (builder.length() != 0) {
                        builder.append(';');
                    }
                    builder.append(entry.getKey());
                    builder.append('=');
                    builder.append(entry.getValue());
                }

                String cookieString = builder.toString();

                if (debug)
                    logger.debug("Setting Cookie header: " + cookieString);
                method.setHeader(HEADER_COOKIE, cookieString);
            }

            // HTTP basic auth support
            if (this.username != null && this.password != null) {
                String auth = this.username + ':' + this.password;
                method.addHeader("Authorization",
                        "Basic " + Base64.encodeBytes(auth.getBytes(), Base64.DONT_BREAK_LINES));
                if (debug)
                    logger.debug("Applied HTTP Basic Authorization for user: " + this.username);
            }

            // prepare the POST/PUT entity data if input supplied
            if (in != null) {
                method.setHeader(HEADER_CONTENT_TYPE, getRequestContentType());
                if (debug)
                    logger.debug("Set Content-Type=" + getRequestContentType());

                boolean urlencoded = getRequestContentType().startsWith(X_WWW_FORM_URLENCODED);
                if (!urlencoded) {
                    // apply content-length here if known (i.e. from proxied req)
                    // if this is not set, then the content will be buffered in memory
                    long contentLength = -1L;
                    if (req != null) {
                        String contentLengthStr = req.getHeader(HEADER_CONTENT_LENGTH);
                        if (contentLengthStr != null) {
                            try {
                                long actualContentLength = Long.parseLong(contentLengthStr);
                                if (actualContentLength > 0) {
                                    contentLength = actualContentLength;
                                }
                            } catch (NumberFormatException e) {
                                logger.warn("Can't parse 'Content-Length' header from '" + contentLengthStr
                                        + "'. The contentLength is set to -1");
                            }
                        }
                    }

                    if (debug)
                        logger.debug(requestMethod + " entity Content-Length=" + contentLength);

                    // remove the Content-Length header as the setEntity() method will perform this explicitly
                    method.removeHeaders(HEADER_CONTENT_LENGTH);

                    try {
                        // Apache doc for AbstractHttpEntity states:
                        // HttpClient must use chunk coding if the entity content length is unknown (== -1).
                        HttpEntity entity = new InputStreamEntity(in, contentLength);
                        ((HttpEntityEnclosingRequest) method)
                                .setEntity(contentLength == -1L || contentLength > 16384L ? entity
                                        : new BufferedHttpEntity(entity));
                        ((HttpEntityEnclosingRequest) method).setHeader(HTTP.EXPECT_DIRECTIVE,
                                HTTP.EXPECT_CONTINUE);
                    } catch (IOException e) {
                        // During the creation of the BufferedHttpEntity the underlying stream can be closed by the client,
                        // this happens if the request is discarded by the browser - we don't log this IOException as INFO
                        // as that would fill the logs with unhelpful noise - enable DEBUG logging to see these messages.
                        throw new RuntimeException(e.getMessage(), e);
                    }
                } else {
                    if (req != null) {
                        // apply any supplied request parameters
                        Map<String, String[]> postParams = req.getParameterMap();
                        if (postParams != null) {
                            List<NameValuePair> params = new ArrayList<NameValuePair>(postParams.size());
                            for (String key : postParams.keySet()) {
                                String[] values = postParams.get(key);
                                for (int i = 0; i < values.length; i++) {
                                    params.add(new BasicNameValuePair(key, values[i]));
                                }
                            }
                        }
                        // ensure that the Content-Length header is not directly proxied - as the underlying
                        // HttpClient will encode the body as appropriate - cannot assume same as the original client sent
                        method.removeHeaders(HEADER_CONTENT_LENGTH);
                    } else {
                        // Apache doc for AbstractHttpEntity states:
                        // HttpClient must use chunk coding if the entity content length is unknown (== -1).
                        HttpEntity entity = new InputStreamEntity(in, -1L);
                        ((HttpEntityEnclosingRequest) method).setEntity(entity);
                        ((HttpEntityEnclosingRequest) method).setHeader(HTTP.EXPECT_DIRECTIVE,
                                HTTP.EXPECT_CONTINUE);
                    }
                }
            }

            //////////////////////////////////////////////////////////////////////////////////////////////////////////////
            // Execute the method to get the response
            response = httpClient.execute(method);

            redirectURL = processResponse(redirectURL, response);
        } while (redirectURL != null && ++retries < maxRetries);

        // record the status code for the internal response object
        int responseCode = response.getStatusLine().getStatusCode();
        if (responseCode >= HttpServletResponse.SC_INTERNAL_SERVER_ERROR && this.exceptionOnError) {
            buildProxiedServerError(response);
        } else if (responseCode == HttpServletResponse.SC_SERVICE_UNAVAILABLE) {
            // Occurs when server is down and likely an ELB response 
            throw new ConnectException(response.toString());
        }
        boolean allowResponseCommit = (responseCode != HttpServletResponse.SC_UNAUTHORIZED
                || commitResponseOnAuthenticationError);
        status.setCode(responseCode);
        if (debug)
            logger.debug("Response status code: " + responseCode);

        // walk over headers that are returned from the connection
        // if we have a servlet response, push the headers back to the existing response object
        // otherwise, store headers on status
        Header contentType = null;
        Header contentLength = null;
        for (Header header : response.getAllHeaders()) {
            // NOTE: Tomcat does not appear to be obeying the servlet spec here.
            //       If you call setHeader() the spec says it will "clear existing values" - i.e. not
            //       add additional values to existing headers - but for Server and Transfer-Encoding
            //       if we set them, then two values are received in the response...
            // In addition handle the fact that the key can be null.
            final String key = header.getName();
            if (key != null) {
                if (!key.equalsIgnoreCase(HEADER_SERVER) && !key.equalsIgnoreCase(HEADER_TRANSFER_ENCODING)) {
                    if (res != null && allowResponseCommit
                            && !this.removeResponseHeaders.contains(key.toLowerCase())) {
                        res.setHeader(key, header.getValue());
                    }

                    // store headers back onto status
                    status.setHeader(key, header.getValue());

                    if (trace)
                        logger.trace("Response header: " + key + "=" + header.getValue());
                }

                // grab a reference to the the content-type header here if we find it
                if (contentType == null && key.equalsIgnoreCase(HEADER_CONTENT_TYPE)) {
                    contentType = header;
                    // additional optional processing based on the Content-Type header
                    processContentType(url, res, contentType);
                }
                // grab a reference to the Content-Length header here if we find it
                else if (contentLength == null && key.equalsIgnoreCase(HEADER_CONTENT_LENGTH)) {
                    contentLength = header;
                }
            }
        }

        // locate response encoding from the headers
        String encoding = null;
        String ct = null;
        if (contentType != null) {
            ct = contentType.getValue();
            int csi = ct.indexOf(CHARSETEQUALS);
            if (csi != -1) {
                encoding = ct.substring(csi + CHARSETEQUALS.length());
                if ((csi = encoding.lastIndexOf(';')) != -1) {
                    encoding = encoding.substring(0, csi);
                }
                if (debug)
                    logger.debug("Response charset: " + encoding);
            }
        }
        if (debug)
            logger.debug("Response encoding: " + contentType);

        // generate container driven error message response for specific response codes
        if (res != null && responseCode == HttpServletResponse.SC_UNAUTHORIZED && allowResponseCommit) {
            res.sendError(responseCode, response.getStatusLine().getReasonPhrase());
        } else {
            // push status to existing response object if required
            if (res != null && allowResponseCommit) {
                res.setStatus(responseCode);
            }
            // perform the stream write from the response to the output
            int bufferSize = this.bufferSize;
            if (contentLength != null) {
                long length = Long.parseLong(contentLength.getValue());
                if (length < bufferSize) {
                    bufferSize = (int) length;
                }
            }
            copyResponseStreamOutput(url, res, out, response, ct, bufferSize);
        }

        // if we get here call was successful
        return encoding;
    } catch (ConnectTimeoutException | SocketTimeoutException timeErr) {
        // caught a socket timeout IO exception - apply internal error code
        logger.info("Exception calling (" + requestMethod + ") " + url.toString());
        status.setCode(HttpServletResponse.SC_REQUEST_TIMEOUT);
        status.setException(timeErr);
        status.setMessage(timeErr.getMessage());
        if (res != null) {
            //return a Request Timeout error
            res.setStatus(HttpServletResponse.SC_REQUEST_TIMEOUT, timeErr.getMessage());
        }

        throw timeErr;
    } catch (UnknownHostException | ConnectException hostErr) {
        // caught an unknown host IO exception 
        logger.info("Exception calling (" + requestMethod + ") " + url.toString());
        status.setCode(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
        status.setException(hostErr);
        status.setMessage(hostErr.getMessage());
        if (res != null) {
            // return server error code
            res.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE, hostErr.getMessage());
        }

        throw hostErr;
    } catch (IOException ioErr) {
        // caught a general IO exception - apply generic error code so one gets returned
        logger.info("Exception calling (" + requestMethod + ") " + url.toString());
        status.setCode(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        status.setException(ioErr);
        status.setMessage(ioErr.getMessage());
        if (res != null) {
            res.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ioErr.getMessage());
        }

        throw ioErr;
    } catch (RuntimeException e) {
        // caught an exception - apply generic error code so one gets returned
        logger.debug("Exception calling (" + requestMethod + ") " + url.toString());
        status.setCode(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        status.setException(e);
        status.setMessage(e.getMessage());
        if (res != null) {
            res.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e.getMessage());
        }

        throw e;
    } finally {
        // reset state values
        if (method != null) {
            method.releaseConnection();
        }
        setRequestContentType(null);
        this.requestMethod = HttpMethod.GET;
    }
}

From source file:com.dropbox.client2.DropboxAPI.java

/**
 * Creates a request that can upload a single chunk of data to the server via the
 * chunked upload protocol. This request reads the InputStream and advances it by
 * an amount equal to the number of bytes uploaded. For most users, the {@link ChunkedUploader}
 * object provides an easier interface to use and should provide most of the
 * functionality needed. If offset is 0 and uploadId is null, a new chunked upload is
 * created on the server./* www  . j a  va2s .c  o  m*/
 *
 * @param is A stream containing the data to be uploaded.
 * @param length The number of bytes to upload.
 * @param listener A ProgressListener (can be {@code null}) that will be notified of upload
 *                 progress.  The progress will be for this individual file chunk (starting
 *                 at zero bytes and ending at {@code length} bytes).
 * @param offset The offset into the file that the contents of the these bytes belongs to.
 * @param uploadId The unique ID identifying this upload to the server.
 * @return A ChunkedUploadRequest which can be used to upload a single chunk of data to Dropbox.
 */

public ChunkedUploadRequest chunkedUploadRequest(InputStream is, long length, ProgressListener listener,
        long offset, String uploadId) {
    String[] params;
    if (offset == 0) {
        params = new String[0];
    } else {
        params = new String[] { "upload_id", uploadId, "offset", "" + offset };
    }
    String url = RESTUtility.buildURL(session.getContentServer(), VERSION, "/chunked_upload/", params);
    HttpPut req = new HttpPut(url);
    session.sign(req);

    InputStreamEntity ise = new InputStreamEntity(is, length);
    ise.setContentEncoding("application/octet-stream");
    ise.setChunked(false);
    HttpEntity entity = ise;

    if (listener != null) {
        entity = new ProgressHttpEntity(entity, listener);
    }
    req.setEntity(entity);

    return new ChunkedUploadRequest(req, session);
}

From source file:ch.iterate.openstack.swift.Client.java

/**
 * @param container          The name of the container
 * @param name               The name of the object
 * @param entity             The name of the request entity (make sure to set the Content-Type
 * @param metadata           The metadata for the object
 * @param md5sum             The 32 character hex encoded MD5 sum of the data
 * @param objectSize         The total size in bytes of the object to be stored
 * @param segmentSize        Optional size in bytes of the object segments to be stored (forces large object support) default 4G
 * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required
 * @param segmentContainer   Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear
 * @param segmentFolder      Optional name of folder for storing file segments, defaults to ".chunks/"
 * @param leaveSegments      Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed
 * @return The ETAG if the save was successful, null otherwise
 * @throws GenericException There was a protocol level error talking to CloudFiles
 *//* www .  j  ava  2 s  . c  om*/
public String storeObject(Region region, String container, String name, HttpEntity entity,
        Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize,
        Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments)
        throws IOException, InterruptedException {
    /*
     * Default values for large object support. We also use the defaults combined with the inputs
     * to determine whether to store as a large object.
     */

    /*
     * The maximum size of a single object (5GiB).
     */
    long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3));

    /*
     * The default minimum segment size (1MiB).
     */
    long minSegmentSize = 1024L * 1024L;

    /*
     * Set the segment size.
     *
     * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments.
     */
    long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3))
            : Math.max(segmentSize, minSegmentSize);

    /*
     * Determines if we will store using large objects - we may do this for 3 reasons:
     *
     *  - A segmentSize has been specified and the object size is greater than the minimum segment size
     *  - If an objectSize is provided and is larger than the single object size limit of 5GiB
     *  - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation)
     *
     * The last case may fail if the user does not provide at least as much data as the minimum segment
     * size configured on the server, and will always produce a large object structure (even if only one
     * small segment is required).
     */
    objectSize = (objectSize == null) ? -1 : objectSize;
    boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize))
            || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1));

    if (!useLargeObject) {
        return storeObject(region, container, name, entity, metadata, md5sum);
    } else {
        /*
         * We need to upload a large object as defined by the method
         * parameters. For now this is done sequentially, but a parallel
         * version using appropriate random access to the underlying data
         * may be desirable.
         *
         * We make the assumption that the given file size will not be
         * greater than int.MAX_VALUE * segmentSize
         *
         */
        leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments;
        dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject;
        segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder;
        segmentContainer = (segmentContainer == null) ? container : segmentContainer;

        Map<String, List<StorageObject>> oldSegmentsToRemove = null;

        /*
         * If we have chosen not to leave existing large object segments in place (default)
         * then we need to collect information about any existing file segments so that we can
         * deal with them after we complete the upload of the new manifest.
         *
         * We should only delete existing segments after a successful upload of a new manifest file
         * because this constitutes an object update and the older file should remain available
         * until the new file can be downloaded.
         */
        if (!leaveSegments) {
            ObjectMetadata existingMetadata;
            String manifestDLO = null;
            Boolean manifestSLO = Boolean.FALSE;

            try {
                existingMetadata = getObjectMetaData(region, container, name);

                if (existingMetadata.getMetaData().containsKey(Constants.MANIFEST_HEADER)) {
                    manifestDLO = existingMetadata.getMetaData().get(Constants.MANIFEST_HEADER);
                } else if (existingMetadata.getMetaData().containsKey(Constants.X_STATIC_LARGE_OBJECT)) {
                    JSONParser parser = new JSONParser();
                    String manifestSLOValue = existingMetadata.getMetaData()
                            .get(Constants.X_STATIC_LARGE_OBJECT);
                    manifestSLO = (Boolean) parser.parse(manifestSLOValue);
                }
            } catch (NotFoundException e) {
                /*
                 * Just means no object exists already, so continue
                 */
            } catch (ParseException e) {
                /*
                 * X_STATIC_LARGE_OBJECT header existed but failed to parse.
                 * If a static large object already exists this must be set to "true".
                 * If we got here then the X_STATIC_LARGE_OBJECT header existed, but failed
                 * to parse as a boolean, so fail upload as a precaution.
                 */
                return null;
            }

            if (manifestDLO != null) {
                /*
                 * We have found an existing dynamic large object, so use the prefix to get a list of
                 * existing objects. If we're putting up a new dlo, make sure the segment prefixes are
                 * different, then we can delete anything that's not in the new list if necessary.
                 */
                String oldContainer = manifestDLO.substring(0, manifestDLO.indexOf('/', 1));
                String oldPath = manifestDLO.substring(manifestDLO.indexOf('/', 1), manifestDLO.length());
                oldSegmentsToRemove = new HashMap<String, List<StorageObject>>();
                oldSegmentsToRemove.put(oldContainer, listObjects(region, oldContainer, oldPath));
            } else if (manifestSLO) {
                /*
                 * We have found an existing static large object, so grab the manifest data that
                 * details the existing segments - delete any later that we don't need any more
                 */

            }
        }

        int segmentNumber = 1;
        long timeStamp = System.currentTimeMillis() / 1000L;
        String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize);

        /*
         * Create subInputStream from the OutputStream we will pass to the
         * HttpEntity for writing content.
         */
        final PipedInputStream contentInStream = new PipedInputStream(64 * 1024);
        final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream);
        SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false);

        /*
         * Fork the call to entity.writeTo() that allows us to grab any exceptions raised
         */
        final HttpEntity e = entity;

        final Callable<Boolean> writer = new Callable<Boolean>() {
            public Boolean call() throws Exception {
                e.writeTo(contentOutStream);
                return Boolean.TRUE;
            }
        };

        ExecutorService writeExecutor = Executors.newSingleThreadExecutor();
        final Future<Boolean> future = writeExecutor.submit(writer);
        /*
         * Check the future for exceptions after we've finished uploading segments
         */

        Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>();
        List<StorageObject> newSegments = new LinkedList<StorageObject>();
        JSONArray manifestSLO = new JSONArray();
        boolean finished = false;

        /*
         * Upload each segment of the file by reading sections of the content input stream
         * until the entire underlying stream is complete
         */
        while (!finished) {
            String segmentName = String.format("%s/%08d", segmentBase, segmentNumber);

            String etag;
            boolean error = false;
            try {
                etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream",
                        segmentName, new HashMap<String, String>());
            } catch (IOException ex) {
                // Finished storing the object
                System.out.println("Caught IO Exception: " + ex.getMessage());
                ex.printStackTrace();
                throw ex;
            }
            String segmentPath = segmentContainer + "/" + segmentName;
            long bytesUploaded = segmentStream.getBytesProduced();

            /*
             * Create the appropriate manifest structure if we're making a static large
             * object.
             *
             *   ETAG returned by the simple upload
             *   total size of segment uploaded
             *   path of segment
             */
            if (!dynamicLargeObject) {
                JSONObject segmentJSON = new JSONObject();

                segmentJSON.put("path", segmentPath);
                segmentJSON.put("etag", etag);
                segmentJSON.put("size_bytes", bytesUploaded);
                manifestSLO.add(segmentJSON);

                newSegments.add(new StorageObject(segmentName));
            }

            segmentNumber++;
            if (!finished) {
                finished = segmentStream.endSourceReached();
            }
            newSegmentsAdded.put(segmentContainer, newSegments);
            System.out.println("JSON: " + manifestSLO.toString());
            if (error)
                return "";

            segmentStream.readMoreBytes(actualSegmentSize);
        }

        /*
         * Attempts to retrieve the return value from the write operation
         * Any exceptions raised can then be handled appropriately
         */
        try {
            future.get();
        } catch (InterruptedException ex) {
            /*
             * The write was interrupted... delete the segments?
             */
        } catch (ExecutionException ex) {
            /*
             * This should always be an IOException or a RuntimeException
             * because the call to entity.writeTo() only throws IOException
             */
            Throwable t = ex.getCause();

            if (t instanceof IOException) {
                throw (IOException) t;
            } else {
                throw (RuntimeException) t;
            }
        }

        /*
         * Create an appropriate manifest depending on our DLO/SLO choice
         */
        String manifestEtag = null;
        if (dynamicLargeObject) {
            /*
             * Empty manifest with header detailing the shared prefix of object segments
             */
            long manifestTimeStamp = System.currentTimeMillis() / 1000L;
            metadata.put("X-Object-Manifest", segmentBase);
            metadata.put("x-object-meta-mtime", String.format("%s", manifestTimeStamp));
            manifestEtag = storeObject(region, container, new ByteArrayInputStream(new byte[0]),
                    entity.getContentType().getValue(), name, metadata);
        } else {
            /*
             * Manifest containing json list specifying details of the object segments.
             */
            URIBuilder urlBuild = new URIBuilder(region.getStorageUrl(container, name));
            urlBuild.setParameter("multipart-manifest", "put");
            URI url;
            try {
                url = urlBuild.build();
                String manifestContent = manifestSLO.toString();
                InputStreamEntity manifestEntity = new InputStreamEntity(
                        new ByteArrayInputStream(manifestContent.getBytes()), -1);
                manifestEntity.setChunked(true);
                manifestEntity.setContentType(entity.getContentType());
                HttpPut method = new HttpPut(url);
                method.setEntity(manifestEntity);
                method.setHeader("x-static-large-object", "true");
                Response response = this.execute(method, new DefaultResponseHandler());
                if (response.getStatusCode() == HttpStatus.SC_CREATED) {
                    manifestEtag = response.getResponseHeader(HttpHeaders.ETAG).getValue();
                } else {
                    throw new GenericException(response);
                }
            } catch (URISyntaxException ex) {
                ex.printStackTrace();
            }
        }

        /*
         * Delete stale segments of overwritten large object if requested.
         */
        if (!leaveSegments) {
            /*
             * Before deleting old segments, remove any objects from the delete list
             * that are also part of a new static large object that were updated during the upload.
             */
            if (!(oldSegmentsToRemove == null)) {
                for (String c : oldSegmentsToRemove.keySet()) {
                    List<StorageObject> rmv = oldSegmentsToRemove.get(c);
                    if (newSegmentsAdded.containsKey(c)) {
                        rmv.removeAll(newSegmentsAdded.get(c));
                    }
                    List<String> rmvNames = new LinkedList<String>();
                    for (StorageObject s : rmv) {
                        rmvNames.add(s.getName());
                    }
                    deleteObjects(region, c, rmvNames);
                }
            }
        }

        return manifestEtag;
    }
}

From source file:com.microsoft.live.LiveConnectClient.java

private UploadRequest createUploadRequest(String path, String filename, InputStream is, long length,
        OverwriteOption overwrite) throws LiveOperationException {
    assert !TextUtils.isEmpty(path);
    assert !TextUtils.isEmpty(filename);
    assert is != null;

    InputStreamEntity entity = new InputStreamEntity(is, length);

    return new UploadRequest(this.session, this.httpClient, path, entity, filename, overwrite);
}

From source file:com.emc.esu.api.rest.EsuRestApiApache.java

private HttpResponse restPost(URL url, Map<String, String> headers, InputStream in, long contentLength)
        throws URISyntaxException, ClientProtocolException, IOException {
    HttpPost post = new HttpPost(url.toURI());

    setHeaders(post, headers);/* w  ww  .j  a v a  2 s.c o  m*/

    if (in != null) {
        post.setEntity(new InputStreamEntity(in, contentLength));
    } else {
        post.setEntity(new ByteArrayEntity(new byte[0]));
    }

    return httpClient.execute(post);
}

From source file:com.emc.esu.api.rest.EsuRestApiApache.java

private HttpResponse restPost(URL url, Map<String, String> headers, BufferSegment data)
        throws URISyntaxException, ClientProtocolException, IOException {
    HttpPost post = new HttpPost(url.toURI());

    setHeaders(post, headers);//w w w  .  j av  a2s  .  c  o m

    if (data != null) {
        if (data.getOffset() == 0 && (data.getSize() == data.getBuffer().length)) {
            // use the native byte array
            post.setEntity(new ByteArrayEntity(data.getBuffer()));
        } else {
            post.setEntity(new InputStreamEntity(
                    new ByteArrayInputStream(data.getBuffer(), data.getOffset(), data.getSize()),
                    data.getSize()));
        }
    } else {
        post.setEntity(new ByteArrayEntity(new byte[0]));
    }

    return httpClient.execute(post);
}

From source file:com.emc.esu.api.rest.EsuRestApiApache.java

private HttpResponse restPut(URL url, Map<String, String> headers, BufferSegment data)
        throws ClientProtocolException, IOException, URISyntaxException {

    HttpPut put = new HttpPut(url.toURI());

    setHeaders(put, headers);/*from  w  w  w  . jav  a 2  s.co  m*/

    if (data != null) {
        if (data.getOffset() == 0 && (data.getSize() == data.getBuffer().length)) {
            // use the native byte array
            put.setEntity(new ByteArrayEntity(data.getBuffer()));
        } else {
            put.setEntity(new InputStreamEntity(
                    new ByteArrayInputStream(data.getBuffer(), data.getOffset(), data.getSize()),
                    data.getSize()));
        }
    } else {
        put.setEntity(new ByteArrayEntity(new byte[0]));
    }

    return httpClient.execute(put);

}