Example usage for org.apache.http.entity InputStreamEntity setChunked

List of usage examples for org.apache.http.entity InputStreamEntity setChunked

Introduction

In this page you can find the example usage for org.apache.http.entity InputStreamEntity setChunked.

Prototype

public void setChunked(boolean z) 

Source Link

Usage

From source file:com.smartsheet.api.internal.http.DefaultHttpClient.java

/**
 * Make an HTTP request and return the response.
 *
 * @param smartsheetRequest the smartsheet request
 * @return the HTTP response/*from  ww w  .  ja v a2  s .c o  m*/
 * @throws HttpClientException the HTTP client exception
 */
public HttpResponse request(HttpRequest smartsheetRequest) throws HttpClientException {
    Util.throwIfNull(smartsheetRequest);
    if (smartsheetRequest.getUri() == null) {
        throw new IllegalArgumentException("A Request URI is required.");
    }

    int attempt = 0;
    long start = System.currentTimeMillis();

    HttpRequestBase apacheHttpRequest;
    HttpResponse smartsheetResponse;

    InputStream bodyStream = null;
    if (smartsheetRequest.getEntity() != null && smartsheetRequest.getEntity().getContent() != null) {
        bodyStream = smartsheetRequest.getEntity().getContent();
    }
    // the retry logic will consume the body stream so we make sure it supports mark/reset and mark it
    boolean canRetryRequest = bodyStream == null || bodyStream.markSupported();
    if (!canRetryRequest) {
        try {
            // attempt to wrap the body stream in a input-stream that does support mark/reset
            bodyStream = new ByteArrayInputStream(StreamUtil.readBytesFromStream(bodyStream));
            // close the old stream (just to be tidy) and then replace it with a reset-able stream
            smartsheetRequest.getEntity().getContent().close();
            smartsheetRequest.getEntity().setContent(bodyStream);
            canRetryRequest = true;
        } catch (IOException ignore) {
        }
    }

    // the retry loop
    while (true) {

        apacheHttpRequest = createApacheRequest(smartsheetRequest);

        // Set HTTP headers
        if (smartsheetRequest.getHeaders() != null) {
            for (Map.Entry<String, String> header : smartsheetRequest.getHeaders().entrySet()) {
                apacheHttpRequest.addHeader(header.getKey(), header.getValue());
            }
        }

        HttpEntitySnapshot requestEntityCopy = null;
        HttpEntitySnapshot responseEntityCopy = null;
        // Set HTTP entity
        final HttpEntity entity = smartsheetRequest.getEntity();
        if (apacheHttpRequest instanceof HttpEntityEnclosingRequestBase && entity != null
                && entity.getContent() != null) {
            try {
                // we need access to the original request stream so we can log it (in the event of errors and/or tracing)
                requestEntityCopy = new HttpEntitySnapshot(entity);
            } catch (IOException iox) {
                logger.error("failed to make copy of original request entity - {}", iox);
            }

            InputStreamEntity streamEntity = new InputStreamEntity(entity.getContent(),
                    entity.getContentLength());
            streamEntity.setChunked(false); // why?  not supported by library?
            ((HttpEntityEnclosingRequestBase) apacheHttpRequest).setEntity(streamEntity);
        }

        // mark the body so we can reset on retry
        if (canRetryRequest && bodyStream != null) {
            bodyStream.mark((int) smartsheetRequest.getEntity().getContentLength());
        }

        // Make the HTTP request
        smartsheetResponse = new HttpResponse();
        HttpContext context = new BasicHttpContext();
        try {
            long startTime = System.currentTimeMillis();
            apacheHttpResponse = this.httpClient.execute(apacheHttpRequest, context);
            long endTime = System.currentTimeMillis();

            // Set request headers to values ACTUALLY SENT (not just created by us), this would include:
            // 'Connection', 'Accept-Encoding', etc. However, if a proxy is used, this may be the proxy's CONNECT
            // request, hence the test for HTTP method first
            Object httpRequest = context.getAttribute("http.request");
            if (httpRequest != null && HttpRequestWrapper.class.isAssignableFrom(httpRequest.getClass())) {
                HttpRequestWrapper actualRequest = (HttpRequestWrapper) httpRequest;
                switch (HttpMethod.valueOf(actualRequest.getMethod())) {
                case GET:
                case POST:
                case PUT:
                case DELETE:
                    apacheHttpRequest.setHeaders(((HttpRequestWrapper) httpRequest).getAllHeaders());
                    break;
                }
            }

            // Set returned headers
            smartsheetResponse.setHeaders(new HashMap<String, String>());
            for (Header header : apacheHttpResponse.getAllHeaders()) {
                smartsheetResponse.getHeaders().put(header.getName(), header.getValue());
            }
            smartsheetResponse.setStatus(apacheHttpResponse.getStatusLine().getStatusCode(),
                    apacheHttpResponse.getStatusLine().toString());

            // Set returned entities
            if (apacheHttpResponse.getEntity() != null) {
                HttpEntity httpEntity = new HttpEntity();
                httpEntity.setContentType(apacheHttpResponse.getEntity().getContentType().getValue());
                httpEntity.setContentLength(apacheHttpResponse.getEntity().getContentLength());
                httpEntity.setContent(apacheHttpResponse.getEntity().getContent());
                smartsheetResponse.setEntity(httpEntity);
                responseEntityCopy = new HttpEntitySnapshot(httpEntity);
            }

            long responseTime = endTime - startTime;
            logRequest(apacheHttpRequest, requestEntityCopy, smartsheetResponse, responseEntityCopy,
                    responseTime);

            if (traces.size() > 0) { // trace-logging of request and response (if so configured)
                RequestAndResponseData requestAndResponseData = RequestAndResponseData.of(apacheHttpRequest,
                        requestEntityCopy, smartsheetResponse, responseEntityCopy, traces);
                TRACE_WRITER.println(requestAndResponseData.toString(tracePrettyPrint));
            }

            if (smartsheetResponse.getStatusCode() == 200) {
                // call successful, exit the retry loop
                break;
            }

            // the retry logic might consume the content stream so we make sure it supports mark/reset and mark it
            InputStream contentStream = smartsheetResponse.getEntity().getContent();
            if (!contentStream.markSupported()) {
                // wrap the response stream in a input-stream that does support mark/reset
                contentStream = new ByteArrayInputStream(StreamUtil.readBytesFromStream(contentStream));
                // close the old stream (just to be tidy) and then replace it with a reset-able stream
                smartsheetResponse.getEntity().getContent().close();
                smartsheetResponse.getEntity().setContent(contentStream);
            }
            try {
                contentStream.mark((int) smartsheetResponse.getEntity().getContentLength());
                long timeSpent = System.currentTimeMillis() - start;
                if (!shouldRetry(++attempt, timeSpent, smartsheetResponse)) {
                    // should not retry, or retry time exceeded, exit the retry loop
                    break;
                }
            } finally {
                if (bodyStream != null) {
                    bodyStream.reset();
                }
                contentStream.reset();
            }
            // moving this to finally causes issues because socket is closed (which means response stream is closed)
            this.releaseConnection();

        } catch (ClientProtocolException e) {
            try {
                logger.warn("ClientProtocolException " + e.getMessage());
                logger.warn("{}", RequestAndResponseData.of(apacheHttpRequest, requestEntityCopy,
                        smartsheetResponse, responseEntityCopy, REQUEST_RESPONSE_SUMMARY));
                // if this is a PUT and was retried by the http client, the body content stream is at the
                // end and is a NonRepeatableRequest. If we marked the body content stream prior to execute,
                // reset and retry
                if (canRetryRequest && e.getCause() instanceof NonRepeatableRequestException) {
                    if (smartsheetRequest.getEntity() != null) {
                        smartsheetRequest.getEntity().getContent().reset();
                    }
                    continue;
                }
            } catch (IOException ignore) {
            }
            throw new HttpClientException("Error occurred.", e);
        } catch (NoHttpResponseException e) {
            try {
                logger.warn("NoHttpResponseException " + e.getMessage());
                logger.warn("{}", RequestAndResponseData.of(apacheHttpRequest, requestEntityCopy,
                        smartsheetResponse, responseEntityCopy, REQUEST_RESPONSE_SUMMARY));
                // check to see if the response was empty and this was a POST. All other HTTP methods
                // will be automatically retried by the http client.
                // (POST is non-idempotent and is not retried automatically, but is safe for us to retry)
                if (canRetryRequest && smartsheetRequest.getMethod() == HttpMethod.POST) {
                    if (smartsheetRequest.getEntity() != null) {
                        smartsheetRequest.getEntity().getContent().reset();
                    }
                    continue;
                }
            } catch (IOException ignore) {
            }
            throw new HttpClientException("Error occurred.", e);
        } catch (IOException e) {
            try {
                logger.warn("{}", RequestAndResponseData.of(apacheHttpRequest, requestEntityCopy,
                        smartsheetResponse, responseEntityCopy, REQUEST_RESPONSE_SUMMARY));
            } catch (IOException ignore) {
            }
            throw new HttpClientException("Error occurred.", e);
        }
    }
    return smartsheetResponse;
}

From source file:edu.isi.misd.tagfiler.client.JakartaClient.java

/**
 * Uploads a file block./*  w ww.  j a v  a2  s.  c  om*/
 * 
 * @param url
 *            the query url
 * @param inputStream
 *            the InputStream where to read from
 * @param length
 *            the number of bytes to read
 * @param first
 *            the first byte to read
 * @param fileLength
 *            the file length
 * @param cookie
 *            the cookie to be set in the request
 * @return the HTTP Response
 */
public ClientURLResponse postFile(String url, InputStream inputStream, long length, long first, long fileLength,
        String cookie) {
    HttpPut httpput = new HttpPut(url);
    httpput.setHeader("Content-Type", "application/octet-stream");
    if (first != 0) {
        httpput.setHeader("Content-Range", "bytes " + first + "-" + (first + length - 1) + "/" + fileLength);
    }
    InputStreamEntity inputStreamEntity = new InputStreamEntity(inputStream, length);
    inputStreamEntity.setChunked(false);
    httpput.setEntity(inputStreamEntity);
    return execute(httpput, cookie);
}

From source file:ch.iterate.openstack.swift.Client.java

/**
 * Store a file on the server, including metadata, with the contents coming from an input stream.  This allows you to
 * not know the entire length of your content when you start to write it.  Nor do you have to hold it entirely in memory
 * at the same time./*from   w  w w  .  ja  v a 2  s  .c o  m*/
 *
 * @param container   The name of the container
 * @param data        Any object that implements InputStream
 * @param contentType The MIME type of the file
 * @param name        The name of the file on the server
 * @param metadata    A map with the metadata as key names and values as the metadata values
 * @return True if response code is 201
 * @throws GenericException Unexpected response
 */
public String storeObject(Region region, String container, InputStream data, String contentType, String name,
        Map<String, String> metadata) throws IOException {
    HttpPut method = new HttpPut(region.getStorageUrl(container, name));
    InputStreamEntity entity = new InputStreamEntity(data, -1);
    entity.setChunked(true);
    entity.setContentType(contentType);
    method.setEntity(entity);
    for (Map.Entry<String, String> key : this.renameObjectMetadata(metadata).entrySet()) {
        method.setHeader(key.getKey(), key.getValue());
    }
    Response response = this.execute(method, new DefaultResponseHandler());
    if (response.getStatusCode() == HttpStatus.SC_CREATED) {
        return response.getResponseHeader(HttpHeaders.ETAG).getValue();
    } else {
        throw new GenericException(response);
    }
}

From source file:ch.iterate.openstack.swift.Client.java

/**
 * @param container          The name of the container
 * @param name               The name of the object
 * @param entity             The name of the request entity (make sure to set the Content-Type
 * @param metadata           The metadata for the object
 * @param md5sum             The 32 character hex encoded MD5 sum of the data
 * @param objectSize         The total size in bytes of the object to be stored
 * @param segmentSize        Optional size in bytes of the object segments to be stored (forces large object support) default 4G
 * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required
 * @param segmentContainer   Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear
 * @param segmentFolder      Optional name of folder for storing file segments, defaults to ".chunks/"
 * @param leaveSegments      Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed
 * @return The ETAG if the save was successful, null otherwise
 * @throws GenericException There was a protocol level error talking to CloudFiles
 *//*from   w  w w  .  j ava  2s  .c o m*/
public String storeObject(Region region, String container, String name, HttpEntity entity,
        Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize,
        Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments)
        throws IOException, InterruptedException {
    /*
     * Default values for large object support. We also use the defaults combined with the inputs
     * to determine whether to store as a large object.
     */

    /*
     * The maximum size of a single object (5GiB).
     */
    long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3));

    /*
     * The default minimum segment size (1MiB).
     */
    long minSegmentSize = 1024L * 1024L;

    /*
     * Set the segment size.
     *
     * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments.
     */
    long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3))
            : Math.max(segmentSize, minSegmentSize);

    /*
     * Determines if we will store using large objects - we may do this for 3 reasons:
     *
     *  - A segmentSize has been specified and the object size is greater than the minimum segment size
     *  - If an objectSize is provided and is larger than the single object size limit of 5GiB
     *  - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation)
     *
     * The last case may fail if the user does not provide at least as much data as the minimum segment
     * size configured on the server, and will always produce a large object structure (even if only one
     * small segment is required).
     */
    objectSize = (objectSize == null) ? -1 : objectSize;
    boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize))
            || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1));

    if (!useLargeObject) {
        return storeObject(region, container, name, entity, metadata, md5sum);
    } else {
        /*
         * We need to upload a large object as defined by the method
         * parameters. For now this is done sequentially, but a parallel
         * version using appropriate random access to the underlying data
         * may be desirable.
         *
         * We make the assumption that the given file size will not be
         * greater than int.MAX_VALUE * segmentSize
         *
         */
        leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments;
        dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject;
        segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder;
        segmentContainer = (segmentContainer == null) ? container : segmentContainer;

        Map<String, List<StorageObject>> oldSegmentsToRemove = null;

        /*
         * If we have chosen not to leave existing large object segments in place (default)
         * then we need to collect information about any existing file segments so that we can
         * deal with them after we complete the upload of the new manifest.
         *
         * We should only delete existing segments after a successful upload of a new manifest file
         * because this constitutes an object update and the older file should remain available
         * until the new file can be downloaded.
         */
        if (!leaveSegments) {
            ObjectMetadata existingMetadata;
            String manifestDLO = null;
            Boolean manifestSLO = Boolean.FALSE;

            try {
                existingMetadata = getObjectMetaData(region, container, name);

                if (existingMetadata.getMetaData().containsKey(Constants.MANIFEST_HEADER)) {
                    manifestDLO = existingMetadata.getMetaData().get(Constants.MANIFEST_HEADER);
                } else if (existingMetadata.getMetaData().containsKey(Constants.X_STATIC_LARGE_OBJECT)) {
                    JSONParser parser = new JSONParser();
                    String manifestSLOValue = existingMetadata.getMetaData()
                            .get(Constants.X_STATIC_LARGE_OBJECT);
                    manifestSLO = (Boolean) parser.parse(manifestSLOValue);
                }
            } catch (NotFoundException e) {
                /*
                 * Just means no object exists already, so continue
                 */
            } catch (ParseException e) {
                /*
                 * X_STATIC_LARGE_OBJECT header existed but failed to parse.
                 * If a static large object already exists this must be set to "true".
                 * If we got here then the X_STATIC_LARGE_OBJECT header existed, but failed
                 * to parse as a boolean, so fail upload as a precaution.
                 */
                return null;
            }

            if (manifestDLO != null) {
                /*
                 * We have found an existing dynamic large object, so use the prefix to get a list of
                 * existing objects. If we're putting up a new dlo, make sure the segment prefixes are
                 * different, then we can delete anything that's not in the new list if necessary.
                 */
                String oldContainer = manifestDLO.substring(0, manifestDLO.indexOf('/', 1));
                String oldPath = manifestDLO.substring(manifestDLO.indexOf('/', 1), manifestDLO.length());
                oldSegmentsToRemove = new HashMap<String, List<StorageObject>>();
                oldSegmentsToRemove.put(oldContainer, listObjects(region, oldContainer, oldPath));
            } else if (manifestSLO) {
                /*
                 * We have found an existing static large object, so grab the manifest data that
                 * details the existing segments - delete any later that we don't need any more
                 */

            }
        }

        int segmentNumber = 1;
        long timeStamp = System.currentTimeMillis() / 1000L;
        String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize);

        /*
         * Create subInputStream from the OutputStream we will pass to the
         * HttpEntity for writing content.
         */
        final PipedInputStream contentInStream = new PipedInputStream(64 * 1024);
        final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream);
        SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false);

        /*
         * Fork the call to entity.writeTo() that allows us to grab any exceptions raised
         */
        final HttpEntity e = entity;

        final Callable<Boolean> writer = new Callable<Boolean>() {
            public Boolean call() throws Exception {
                e.writeTo(contentOutStream);
                return Boolean.TRUE;
            }
        };

        ExecutorService writeExecutor = Executors.newSingleThreadExecutor();
        final Future<Boolean> future = writeExecutor.submit(writer);
        /*
         * Check the future for exceptions after we've finished uploading segments
         */

        Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>();
        List<StorageObject> newSegments = new LinkedList<StorageObject>();
        JSONArray manifestSLO = new JSONArray();
        boolean finished = false;

        /*
         * Upload each segment of the file by reading sections of the content input stream
         * until the entire underlying stream is complete
         */
        while (!finished) {
            String segmentName = String.format("%s/%08d", segmentBase, segmentNumber);

            String etag;
            boolean error = false;
            try {
                etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream",
                        segmentName, new HashMap<String, String>());
            } catch (IOException ex) {
                // Finished storing the object
                System.out.println("Caught IO Exception: " + ex.getMessage());
                ex.printStackTrace();
                throw ex;
            }
            String segmentPath = segmentContainer + "/" + segmentName;
            long bytesUploaded = segmentStream.getBytesProduced();

            /*
             * Create the appropriate manifest structure if we're making a static large
             * object.
             *
             *   ETAG returned by the simple upload
             *   total size of segment uploaded
             *   path of segment
             */
            if (!dynamicLargeObject) {
                JSONObject segmentJSON = new JSONObject();

                segmentJSON.put("path", segmentPath);
                segmentJSON.put("etag", etag);
                segmentJSON.put("size_bytes", bytesUploaded);
                manifestSLO.add(segmentJSON);

                newSegments.add(new StorageObject(segmentName));
            }

            segmentNumber++;
            if (!finished) {
                finished = segmentStream.endSourceReached();
            }
            newSegmentsAdded.put(segmentContainer, newSegments);
            System.out.println("JSON: " + manifestSLO.toString());
            if (error)
                return "";

            segmentStream.readMoreBytes(actualSegmentSize);
        }

        /*
         * Attempts to retrieve the return value from the write operation
         * Any exceptions raised can then be handled appropriately
         */
        try {
            future.get();
        } catch (InterruptedException ex) {
            /*
             * The write was interrupted... delete the segments?
             */
        } catch (ExecutionException ex) {
            /*
             * This should always be an IOException or a RuntimeException
             * because the call to entity.writeTo() only throws IOException
             */
            Throwable t = ex.getCause();

            if (t instanceof IOException) {
                throw (IOException) t;
            } else {
                throw (RuntimeException) t;
            }
        }

        /*
         * Create an appropriate manifest depending on our DLO/SLO choice
         */
        String manifestEtag = null;
        if (dynamicLargeObject) {
            /*
             * Empty manifest with header detailing the shared prefix of object segments
             */
            long manifestTimeStamp = System.currentTimeMillis() / 1000L;
            metadata.put("X-Object-Manifest", segmentBase);
            metadata.put("x-object-meta-mtime", String.format("%s", manifestTimeStamp));
            manifestEtag = storeObject(region, container, new ByteArrayInputStream(new byte[0]),
                    entity.getContentType().getValue(), name, metadata);
        } else {
            /*
             * Manifest containing json list specifying details of the object segments.
             */
            URIBuilder urlBuild = new URIBuilder(region.getStorageUrl(container, name));
            urlBuild.setParameter("multipart-manifest", "put");
            URI url;
            try {
                url = urlBuild.build();
                String manifestContent = manifestSLO.toString();
                InputStreamEntity manifestEntity = new InputStreamEntity(
                        new ByteArrayInputStream(manifestContent.getBytes()), -1);
                manifestEntity.setChunked(true);
                manifestEntity.setContentType(entity.getContentType());
                HttpPut method = new HttpPut(url);
                method.setEntity(manifestEntity);
                method.setHeader("x-static-large-object", "true");
                Response response = this.execute(method, new DefaultResponseHandler());
                if (response.getStatusCode() == HttpStatus.SC_CREATED) {
                    manifestEtag = response.getResponseHeader(HttpHeaders.ETAG).getValue();
                } else {
                    throw new GenericException(response);
                }
            } catch (URISyntaxException ex) {
                ex.printStackTrace();
            }
        }

        /*
         * Delete stale segments of overwritten large object if requested.
         */
        if (!leaveSegments) {
            /*
             * Before deleting old segments, remove any objects from the delete list
             * that are also part of a new static large object that were updated during the upload.
             */
            if (!(oldSegmentsToRemove == null)) {
                for (String c : oldSegmentsToRemove.keySet()) {
                    List<StorageObject> rmv = oldSegmentsToRemove.get(c);
                    if (newSegmentsAdded.containsKey(c)) {
                        rmv.removeAll(newSegmentsAdded.get(c));
                    }
                    List<String> rmvNames = new LinkedList<String>();
                    for (StorageObject s : rmv) {
                        rmvNames.add(s.getName());
                    }
                    deleteObjects(region, c, rmvNames);
                }
            }
        }

        return manifestEtag;
    }
}

From source file:com.dropbox.client2.DropboxAPI.java

/**
 * Creates a request that can upload a single chunk of data to the server via the
 * chunked upload protocol. This request reads the InputStream and advances it by
 * an amount equal to the number of bytes uploaded. For most users, the {@link ChunkedUploader}
 * object provides an easier interface to use and should provide most of the
 * functionality needed. If offset is 0 and uploadId is null, a new chunked upload is
 * created on the server./*from   w ww. j av  a  2 s .c  om*/
 *
 * @param is A stream containing the data to be uploaded.
 * @param length The number of bytes to upload.
 * @param listener A ProgressListener (can be {@code null}) that will be notified of upload
 *                 progress.  The progress will be for this individual file chunk (starting
 *                 at zero bytes and ending at {@code length} bytes).
 * @param offset The offset into the file that the contents of the these bytes belongs to.
 * @param uploadId The unique ID identifying this upload to the server.
 * @return A ChunkedUploadRequest which can be used to upload a single chunk of data to Dropbox.
 */

public ChunkedUploadRequest chunkedUploadRequest(InputStream is, long length, ProgressListener listener,
        long offset, String uploadId) {
    String[] params;
    if (offset == 0) {
        params = new String[0];
    } else {
        params = new String[] { "upload_id", uploadId, "offset", "" + offset };
    }
    String url = RESTUtility.buildURL(session.getContentServer(), VERSION, "/chunked_upload/", params);
    HttpPut req = new HttpPut(url);
    session.sign(req);

    InputStreamEntity ise = new InputStreamEntity(is, length);
    ise.setContentEncoding("application/octet-stream");
    ise.setChunked(false);
    HttpEntity entity = ise;

    if (listener != null) {
        entity = new ProgressHttpEntity(entity, listener);
    }
    req.setEntity(entity);

    return new ChunkedUploadRequest(req, session);
}

From source file:com.dropbox.client2.DropboxAPI.java

/**
 * Creates a request to upload an {@link InputStream} to a Dropbox file.
 * You can then {@code upload()} or {@code abort()} this request. This is
 * the advanced version, which you should only use if you really need the
 * flexibility of uploading using an {@link InputStream}.
 *
 * @param path the full Dropbox path where to put the file, including
 *         directories and filename.//from www  .j  a va  2 s  .  c o m
 * @param is the {@link InputStream} from which to upload.
 * @param length the amount of bytes to read from the {@link InputStream}.
 * @param overwrite whether to overwrite the file if it already exists. If
 *         true, any existing file will always be overwritten. If false,
 *         files will be overwritten only if the {@code parentRev} matches
 *         the current rev on the server.  Otherwise, there is a conflict,
 *         which is resolved by the behavior specified by autorename.
 * @param parentRev the rev of the file at which the user started editing
 *         it (obtained from a metadata call), or null if this is a new
 *         upload. If null, or if it does not match the latest rev on the
 *         server, a copy of the file will be created and you'll receive
 *         the new metadata upon executing the request.
 * @param autoRename If False, conflicts produce a DropboxServerException.
 *          If True, a conflicted copy of the file will be created and you
 *          will get the new file's metadata {@link Entry}.
 * @param listener an optional {@link ProgressListener} to receive upload
 *         progress updates, or null.
 *
 * @return an {@link UploadRequest}.
 *
 * @throws IllegalArgumentException if {@code newFilename} is null or
 *         empty.
 * @throws DropboxUnlinkedException if you have not set an access token
 *         pair on the session, or if the user has revoked access.
 * @throws DropboxFileSizeException if the file is bigger than the
 *         maximum allowed by the API. See
 *         {@code DropboxAPI.MAX_UPLOAD_SIZE}.
 * @throws DropboxException for any other unknown errors. This is also a
 *         superclass of all other Dropbox exceptions, so you may want to
 *         only catch this exception which signals that some kind of error
 *         occurred.
 */
private UploadRequest putFileRequest(String path, InputStream is, long length, boolean overwrite,
        String parentRev, boolean autoRename, ProgressListener listener) throws DropboxException {
    if (path == null || path.equals("")) {
        throw new IllegalArgumentException("path is null or empty.");
    }

    assertAuthenticated();

    if (!path.startsWith("/")) {
        path = "/" + path;
    }

    String target = "/files_put/" + session.getAccessType() + path;

    if (parentRev == null) {
        parentRev = "";
    }

    String[] params = new String[] { "overwrite", String.valueOf(overwrite), "parent_rev", parentRev,
            "autorename", String.valueOf(autoRename), "locale", session.getLocale().toString() };

    String url = RESTUtility.buildURL(session.getContentServer(), VERSION, target, params);

    HttpPut req = new HttpPut(url);
    session.sign(req);

    InputStreamEntity isEntity = new InputStreamEntity(is, length);
    isEntity.setContentEncoding("application/octet-stream");
    isEntity.setChunked(false);

    HttpEntity entity = isEntity;

    if (listener != null) {
        entity = new ProgressHttpEntity(entity, listener);
    }

    req.setEntity(entity);

    return new BasicUploadRequest(req, session);

}

From source file:com.rackspacecloud.client.cloudfiles.FilesClient.java

/**
 *  /*from w w  w  .  j  a v  a 2 s.  co  m*/
 * 
 * @param container
 *             
 * @param data
 *             
 * @param contentType
 *             MIME
 * @param name
 *             
 * @param metadata
 *             
 * @param callback
 *             NULL
 * 
 * @throws IOException
 *              IO
 * @throws HttpException
 *              Http
 * @throws FilesExcepiton
 *              
 * 
 */
public String storeStreamedObject(String container, InputStream data, String contentType, String name,
        Map<String, String> metadata) throws IOException, HttpException, FilesException {
    if (this.isLoggedin()) {
        String objName = name;
        if (isValidContainerName(container) && isValidObjectName(objName)) {
            HttpPut method = new HttpPut(
                    storageURL + "/" + sanitizeForURI(container) + "/" + sanitizeForURI(objName));
            method.getParams().setIntParameter("http.socket.timeout", connectionTimeOut);
            method.setHeader(FilesConstants.X_AUTH_TOKEN, authToken);
            InputStreamEntity entity = new InputStreamEntity(data, -1);
            entity.setChunked(true);
            entity.setContentType(contentType);
            method.setEntity(entity);
            for (String key : metadata.keySet()) {
                // logger.warn("Key:" + key + ":" +
                // sanitizeForURI(metadata.get(key)));
                method.setHeader(FilesConstants.X_OBJECT_META + key, sanitizeForURI(metadata.get(key)));
            }
            method.removeHeaders("Content-Length");

            try {
                FilesResponse response = new FilesResponse(client.execute(method));

                if (response.getStatusCode() == HttpStatus.SC_CREATED) {
                    return response.getResponseHeader(FilesConstants.E_TAG).getValue();
                } else {
                    logger.error(response.getStatusLine());
                    throw new FilesException("Unexpected result", response.getResponseHeaders(),
                            response.getStatusLine());
                }
            } finally {
                method.abort();
            }
        } else {
            if (!isValidObjectName(objName)) {
                throw new FilesInvalidNameException(objName);
            } else {
                throw new FilesInvalidNameException(container);
            }
        }
    } else {
        throw new FilesAuthorizationException("You must be logged in", null, null);
    }
}