List of usage examples for org.apache.http.client.methods HttpRequestBase setURI
public void setURI(final URI uri)
From source file:com.okta.sdk.impl.http.httpclient.HttpClientRequestExecutor.java
@Override public Response executeRequest(Request request) throws RestException { Assert.notNull(request, "Request argument cannot be null."); int retryCount = 0; URI redirectUri = null;//from ww w .j a v a2 s. c om HttpEntity entity = null; RestException exception = null; // Make a copy of the original request params and headers so that we can // permute them in the loop and start over with the original every time. QueryString originalQuery = new QueryString(); originalQuery.putAll(request.getQueryString()); HttpHeaders originalHeaders = new HttpHeaders(); originalHeaders.putAll(request.getHeaders()); while (true) { if (redirectUri != null) { request = new DefaultRequest(request.getMethod(), redirectUri.toString(), null, null, request.getBody(), request.getHeaders().getContentLength()); } if (retryCount > 0) { request.setQueryString(originalQuery); request.setHeaders(originalHeaders); } // Sign the request this.requestAuthenticator.authenticate(request); HttpRequestBase httpRequest = this.httpClientRequestFactory.createHttpClientRequest(request, entity); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } HttpResponse httpResponse = null; try { // We don't want to treat a redirect like a retry, // so if redirectUri is not null, we won't pause // before executing the request below. if (retryCount > 0 && redirectUri == null) { pauseExponentially(retryCount, exception); if (entity != null) { InputStream content = entity.getContent(); if (content.markSupported()) { content.reset(); } } } // reset redirectUri so that if there is an exception, we will pause on retry redirectUri = null; exception = null; retryCount++; httpResponse = httpClient.execute(httpRequest); if (isRedirect(httpResponse)) { Header[] locationHeaders = httpResponse.getHeaders("Location"); String location = locationHeaders[0].getValue(); log.debug("Redirecting to: {}", location); redirectUri = URI.create(location); httpRequest.setURI(redirectUri); } else { Response response = toSdkResponse(httpResponse); int httpStatus = response.getHttpStatus(); if (httpStatus == 429) { throw new RestException( "HTTP 429: Too Many Requests. Exceeded request rate limit in the allotted amount of time."); } if ((httpStatus == 503 || httpStatus == 504) && retryCount <= this.numRetries) { //allow the loop to continue to execute a retry request continue; } return response; } } catch (Throwable t) { log.warn("Unable to execute HTTP request: ", t.getMessage(), t); if (t instanceof RestException) { exception = (RestException) t; } if (!shouldRetry(httpRequest, t, retryCount)) { throw new RestException("Unable to execute HTTP request: " + t.getMessage(), t); } } finally { try { httpResponse.getEntity().getContent().close(); } catch (Throwable ignored) { // NOPMD } } } }
From source file:com.stormpath.sdk.impl.http.httpclient.HttpClientRequestExecutor.java
@Override public Response executeRequest(Request request) throws RestException { Assert.notNull(request, "Request argument cannot be null."); int retryCount = 0; URI redirectUri = null;/* w w w. j a v a 2 s. c o m*/ HttpEntity entity = null; RestException exception = null; // Make a copy of the original request params and headers so that we can // permute them in the loop and start over with the original every time. QueryString originalQuery = new QueryString(); originalQuery.putAll(request.getQueryString()); HttpHeaders originalHeaders = new HttpHeaders(); originalHeaders.putAll(request.getHeaders()); while (true) { if (redirectUri != null) { request = new DefaultRequest(request.getMethod(), redirectUri.toString(), null, null, request.getBody(), request.getHeaders().getContentLength()); } if (retryCount > 0) { request.setQueryString(originalQuery); request.setHeaders(originalHeaders); } // Sign the request this.requestAuthenticator.authenticate(request); HttpRequestBase httpRequest = this.httpClientRequestFactory.createHttpClientRequest(request, entity); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } HttpResponse httpResponse = null; try { // We don't want to treat a redirect like a retry, // so if redirectUri is not null, we won't pause // before executing the request below. if (retryCount > 0 && redirectUri == null) { pauseExponentially(retryCount, exception); if (entity != null) { InputStream content = entity.getContent(); if (content.markSupported()) { content.reset(); } } } // reset redirectUri so that if there is an exception, we will pause on retry redirectUri = null; exception = null; retryCount++; httpResponse = httpClient.execute(httpRequest); if (isRedirect(httpResponse)) { Header[] locationHeaders = httpResponse.getHeaders("Location"); String location = locationHeaders[0].getValue(); log.debug("Redirecting to: {}", location); redirectUri = request.getResourceUrl().resolve(location); httpRequest.setURI(redirectUri); } else { Response response = toSdkResponse(httpResponse); int httpStatus = response.getHttpStatus(); if (httpStatus == 429) { throw new RestException( "HTTP 429: Too Many Requests. Exceeded request rate limit in the allotted amount of time."); } if ((httpStatus == 503 || httpStatus == 504) && retryCount <= this.numRetries) { //allow the loop to continue to execute a retry request continue; } return response; } } catch (Throwable t) { log.warn("Unable to execute HTTP request: ", t.getMessage(), t); if (t instanceof RestException) { exception = (RestException) t; } if (!shouldRetry(httpRequest, t, retryCount)) { throw new RestException("Unable to execute HTTP request: " + t.getMessage(), t); } } finally { try { httpResponse.getEntity().getContent().close(); } catch (Throwable ignored) { } } } }
From source file:crawlercommons.fetcher.SimpleHttpFetcher.java
private FetchedResult doRequest(HttpRequestBase request, String url, Payload payload) throws BaseFetchException { LOGGER.trace("Fetching " + url); HttpResponse response;/*ww w .j av a2 s . c o m*/ long readStartTime; Metadata headerMap = new Metadata(); String redirectedUrl = null; String newBaseUrl = null; int numRedirects = 0; boolean needAbort = true; String contentType = ""; String mimeType = ""; String hostAddress = null; // Create a local instance of cookie store, and bind to local context // Without this we get killed w/lots of threads, due to sync() on single cookie store. HttpContext localContext = new BasicHttpContext(); CookieStore cookieStore = new BasicCookieStore(); localContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore); StringBuilder fetchTrace = null; if (LOGGER.isTraceEnabled()) { fetchTrace = new StringBuilder("Fetched url: " + url); } try { request.setURI(new URI(url)); readStartTime = System.currentTimeMillis(); response = _httpClient.execute(request, localContext); Header[] headers = response.getAllHeaders(); for (Header header : headers) { headerMap.add(header.getName(), header.getValue()); } int httpStatus = response.getStatusLine().getStatusCode(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; status code: " + httpStatus); if (headerMap.get(HttpHeaders.CONTENT_LENGTH) != null) { fetchTrace.append("; Content-Length: " + headerMap.get(HttpHeaders.CONTENT_LENGTH)); } if (headerMap.get(HttpHeaders.LOCATION) != null) { fetchTrace.append("; Location: " + headerMap.get(HttpHeaders.LOCATION)); } } if ((httpStatus < 200) || (httpStatus >= 300)) { // We can't just check against SC_OK, as some wackos return 201, 202, etc throw new HttpFetchException(url, "Error fetching " + url, httpStatus, headerMap); } redirectedUrl = extractRedirectedUrl(url, localContext); URI permRedirectUri = (URI) localContext.getAttribute(PERM_REDIRECT_CONTEXT_KEY); if (permRedirectUri != null) { newBaseUrl = permRedirectUri.toURL().toExternalForm(); } Integer redirects = (Integer) localContext.getAttribute(REDIRECT_COUNT_CONTEXT_KEY); if (redirects != null) { numRedirects = redirects.intValue(); } hostAddress = (String) (localContext.getAttribute(HOST_ADDRESS)); if (hostAddress == null) { throw new UrlFetchException(url, "Host address not saved in context"); } Header cth = response.getFirstHeader(HttpHeaders.CONTENT_TYPE); if (cth != null) { contentType = cth.getValue(); } // Check if we should abort due to mime-type filtering. Note that this will fail if the server // doesn't report a mime-type, but that's how we want it as this configuration is typically // used when only a subset of parsers are installed/enabled, so we don't want the auto-detect // code in Tika to get triggered & try to process an unsupported type. If you want unknown // mime-types from the server to be processed, set "" as one of the valid mime-types in FetcherPolicy. mimeType = getMimeTypeFromContentType(contentType); Set<String> mimeTypes = getValidMimeTypes(); if ((mimeTypes != null) && (mimeTypes.size() > 0)) { if (!mimeTypes.contains(mimeType)) { throw new AbortedFetchException(url, "Invalid mime-type: " + mimeType, AbortedFetchReason.INVALID_MIMETYPE); } } needAbort = false; } catch (ClientProtocolException e) { // Oleg guarantees that no abort is needed in the case of an IOException (which is is a subclass of) needAbort = false; // If the root case was a "too many redirects" error, we want to map this to a specific // exception that contains the final redirect. if (e.getCause() instanceof MyRedirectException) { MyRedirectException mre = (MyRedirectException) e.getCause(); String redirectUrl = url; try { redirectUrl = mre.getUri().toURL().toExternalForm(); } catch (MalformedURLException e2) { LOGGER.warn("Invalid URI saved during redirect handling: " + mre.getUri()); } throw new RedirectFetchException(url, redirectUrl, mre.getReason()); } else if (e.getCause() instanceof RedirectException) { throw new RedirectFetchException(url, extractRedirectedUrl(url, localContext), RedirectExceptionReason.TOO_MANY_REDIRECTS); } else { throw new IOFetchException(url, e); } } catch (IOException e) { // Oleg guarantees that no abort is needed in the case of an IOException needAbort = false; if (e instanceof ConnectionPoolTimeoutException) { // Should never happen, so let's dump some info about the connection pool. ThreadSafeClientConnManager cm = (ThreadSafeClientConnManager) _httpClient.getConnectionManager(); int numConnections = cm.getConnectionsInPool(); cm.closeIdleConnections(0, TimeUnit.MILLISECONDS); LOGGER.error(String.format( "Got ConnectionPoolTimeoutException: %d connections before, %d after idle close", numConnections, cm.getConnectionsInPool())); } throw new IOFetchException(url, e); } catch (URISyntaxException e) { throw new UrlFetchException(url, e.getMessage()); } catch (IllegalStateException e) { throw new UrlFetchException(url, e.getMessage()); } catch (BaseFetchException e) { throw e; } catch (Exception e) { // Map anything else to a generic IOFetchException // TODO KKr - create generic fetch exception throw new IOFetchException(url, new IOException(e)); } finally { safeAbort(needAbort, request); } // Figure out how much data we want to try to fetch. int maxContentSize = getMaxContentSize(mimeType); int targetLength = maxContentSize; boolean truncated = false; String contentLengthStr = headerMap.get(HttpHeaders.CONTENT_LENGTH); if (contentLengthStr != null) { try { int contentLength = Integer.parseInt(contentLengthStr); if (contentLength > targetLength) { truncated = true; } else { targetLength = contentLength; } } catch (NumberFormatException e) { // Ignore (and log) invalid content length values. LOGGER.warn("Invalid content length in header: " + contentLengthStr); } } // Now finally read in response body, up to targetLength bytes. // Note that entity might be null, for zero length responses. byte[] content = new byte[0]; long readRate = 0; HttpEntity entity = response.getEntity(); needAbort = true; if (entity != null) { InputStream in = null; try { in = entity.getContent(); byte[] buffer = new byte[BUFFER_SIZE]; int bytesRead = 0; int totalRead = 0; ByteArrayOutputStream out = new ByteArrayOutputStream(DEFAULT_BYTEARRAY_SIZE); int readRequests = 0; int minResponseRate = getMinResponseRate(); // TODO KKr - we need to monitor the rate while reading a // single block. Look at HttpClient // metrics support for how to do this. Once we fix this, fix // the test to read a smaller (< 20K) // chuck of data. while ((totalRead < targetLength) && ((bytesRead = in.read(buffer, 0, Math.min(buffer.length, targetLength - totalRead))) != -1)) { readRequests += 1; totalRead += bytesRead; out.write(buffer, 0, bytesRead); // Assume read time is at least one millisecond, to avoid DBZ exception. long totalReadTime = Math.max(1, System.currentTimeMillis() - readStartTime); readRate = (totalRead * 1000L) / totalReadTime; // Don't bail on the first read cycle, as we can get a hiccup starting out. // Also don't bail if we've read everything we need. if ((readRequests > 1) && (totalRead < targetLength) && (readRate < minResponseRate)) { throw new AbortedFetchException(url, "Slow response rate of " + readRate + " bytes/sec", AbortedFetchReason.SLOW_RESPONSE_RATE); } // Check to see if we got interrupted. if (Thread.interrupted()) { throw new AbortedFetchException(url, AbortedFetchReason.INTERRUPTED); } } content = out.toByteArray(); needAbort = truncated || (in.available() > 0); } catch (IOException e) { // We don't need to abort if there's an IOException throw new IOFetchException(url, e); } finally { safeAbort(needAbort, request); safeClose(in); } } // Toss truncated image content. if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated image", AbortedFetchReason.CONTENT_SIZE); } // Now see if we need to uncompress the content. String contentEncoding = headerMap.get(HttpHeaders.CONTENT_ENCODING); if (contentEncoding != null) { if (LOGGER.isTraceEnabled()) { fetchTrace.append("; Content-Encoding: " + contentEncoding); } // TODO KKr We might want to just decompress a truncated gzip // containing text (since we have a max content size to save us // from any gzip corruption). We might want to break the following // out into a separate method, by the way (if not refactor this // entire monolithic method). // try { if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) { if (truncated) { throw new AbortedFetchException(url, "Truncated compressed data", AbortedFetchReason.CONTENT_SIZE); } else { ExpandedResult expandedResult = EncodingUtils.processGzipEncoded(content, maxContentSize); truncated = expandedResult.isTruncated(); if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated decompressed image", AbortedFetchReason.CONTENT_SIZE); } else { content = expandedResult.getExpanded(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; unzipped to " + content.length + " bytes"); } } // } else if ("deflate".equals(contentEncoding)) { // content = EncodingUtils.processDeflateEncoded(content); // if (LOGGER.isTraceEnabled()) { // fetchTrace.append("; inflated to " + content.length + " bytes"); // } } } } catch (IOException e) { throw new IOFetchException(url, e); } } // Finally dump out the trace msg we've been building. if (LOGGER.isTraceEnabled()) { LOGGER.trace(fetchTrace.toString()); } // TODO KKr - Save truncated flag in FetchedResult/FetchedDatum. return new FetchedResult(url, redirectedUrl, System.currentTimeMillis(), headerMap, content, contentType, (int) readRate, payload, newBaseUrl, numRedirects, hostAddress); }
From source file:com.liferay.ide.core.remote.RemoteConnection.java
protected Object httpJSONAPI(Object... args) throws APIException { if (!(args[0] instanceof HttpRequestBase)) { throw new IllegalArgumentException("First argument must be a HttpRequestBase."); //$NON-NLS-1$ }//from www .j ava 2 s . c o m Object retval = null; String api = null; Object[] params = new Object[0]; final HttpRequestBase request = (HttpRequestBase) args[0]; final boolean isPostRequest = request instanceof HttpPost; if (args[1] instanceof String) { api = args[1].toString(); } else if (args[1] instanceof Object[]) { params = (Object[]) args[1]; api = params[0].toString(); } else { throw new IllegalArgumentException("2nd argument must be either String or Object[]"); //$NON-NLS-1$ } try { final URIBuilder builder = new URIBuilder(); builder.setScheme("http"); //$NON-NLS-1$ builder.setHost(getHost()); builder.setPort(getHttpPort()); builder.setPath(api); List<NameValuePair> postParams = new ArrayList<NameValuePair>(); if (params.length >= 3) { for (int i = 1; i < params.length; i += 2) { String name = null; String value = StringPool.EMPTY; if (params[i] != null) { name = params[i].toString(); } if (params[i + 1] != null) { value = params[i + 1].toString(); } if (isPostRequest) { postParams.add(new BasicNameValuePair(name, value)); } else { builder.setParameter(name, value); } } } if (isPostRequest) { HttpPost postRequest = ((HttpPost) request); if (postRequest.getEntity() == null) { postRequest.setEntity(new UrlEncodedFormEntity(postParams)); } } request.setURI(builder.build()); String response = getHttpResponse(request); if (response != null && response.length() > 0) { Object jsonResponse = getJSONResponse(response); if (jsonResponse == null) { throw new APIException(api, "Unable to get response: " + response); //$NON-NLS-1$ } else { retval = jsonResponse; } } } catch (APIException e) { throw e; } catch (Exception e) { throw new APIException(api, e); } finally { try { request.releaseConnection(); } finally { // no need to log error } } return retval; }
From source file:com.amazonaws.http.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *//*from ww w .j a v a 2s.c o m*/ private <T> Response<T> executeHelper(final Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); /* add the service endpoint to the logs. You can infer service name from service endpoint */ awsRequestMetrics.addProperty(Field.ServiceName, request.getServiceName()); awsRequestMetrics.addProperty(Field.ServiceEndpoint, request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int requestCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonClientException retriedException = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new LinkedHashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); final AWSCredentials credentials = executionContext.getCredentials(); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); ProgressListener listener = awsreq.getGeneralProgressListener(); Signer signer = null; while (true) { ++requestCount; awsRequestMetrics.setCounter(Field.RequestCount, requestCount); if (requestCount > 1) { // retry request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse apacheResponse = null; try { // Sign the request if a signer was provided if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint()); if (signer != null && credentials != null) { awsRequestMetrics.startEvent(Field.RequestSigningTime); try { signer.sign(request, credentials); } finally { awsRequestMetrics.endEvent(Field.RequestSigningTime); } } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (requestCount > 1) { // retry // Notify the progress listener of the retry publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); awsRequestMetrics.startEvent(Field.RetryPauseTime); try { pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount, config.getRetryPolicy()); } finally { awsRequestMetrics.endEvent(Field.RetryPauseTime); } } if (entity != null) { InputStream content = entity.getContent(); if (requestCount > 1) { // retry if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics); HttpContext httpContext = new BasicHttpContext(); httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics); retriedException = null; publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(Field.HttpRequestTime); try { apacheResponse = httpClient.execute(httpRequest, httpContext); } finally { awsRequestMetrics.endEvent(Field.HttpRequestTime); } publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); if (isRequestSuccessful(apacheResponse)) { awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode()); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse); T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse, executionContext); return new Response<T>(response, httpResponse); } else if (isTemporaryRedirect(apacheResponse)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = apacheResponse.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode()); awsRequestMetrics.addProperty(Field.RedirectLocation, redirectedLocation); awsRequestMetrics.addProperty(Field.AWSRequestID, null); } else { leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse); awsRequestMetrics.addProperty(Field.AWSRequestID, ase.getRequestId()); awsRequestMetrics.addProperty(Field.AWSErrorCode, ase.getErrorCode()); awsRequestMetrics.addProperty(Field.StatusCode, ase.getStatusCode()); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount, config.getRetryPolicy())) { throw ase; } // Cache the retryable exception retriedException = ase; /* * Checking for clock skew error again because we don't want to set the * global time offset for every service exception. */ if (RetryUtils.isClockSkewError(ase)) { int timeOffset = parseClockSkewOffset(apacheResponse, ase); SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset); } resetRequestAfterError(request, ase); } } catch (IOException ioe) { if (log.isInfoEnabled()) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } awsRequestMetrics.incrementCounter(Field.Exception); awsRequestMetrics.addProperty(Field.Exception, ioe); awsRequestMetrics.addProperty(Field.AWSRequestID, null); AmazonClientException ace = new AmazonClientException( "Unable to execute HTTP request: " + ioe.getMessage(), ioe); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount, config.getRetryPolicy())) { throw ace; } // Cache the retryable exception retriedException = ace; resetRequestAfterError(request, ioe); } catch (RuntimeException e) { throw handleUnexpectedFailure(e, awsRequestMetrics); } catch (Error e) { throw handleUnexpectedFailure(e, awsRequestMetrics); } finally { /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { if (apacheResponse != null && apacheResponse.getEntity() != null && apacheResponse.getEntity().getContent() != null) { apacheResponse.getEntity().getContent().close(); } } catch (IOException e) { log.warn("Cannot close the response content.", e); } } } } /* end while (true) */ }
From source file:com.amazonaws.client.service.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) */// www . jav a 2 s . com private <T> Response<T> executeHelper(final Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; /* add the service endpoint to the logs. You can infer service name from service endpoint */ AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics() .addPropertyWith(ServiceName, request.getServiceName()) .addPropertyWith(ServiceEndpoint, request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int requestCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonClientException retriedException = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new LinkedHashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); final AWSCredentials credentials = executionContext.getCredentials(); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); ProgressListener listener = awsreq.getGeneralProgressListener(); Signer signer = null; while (true) { ++requestCount; awsRequestMetrics.setCounter(RequestCount, requestCount); if (requestCount > 1) { // retry request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse apacheResponse = null; try { // Sign the request if a signer was provided if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint()); if (signer != null && credentials != null) { awsRequestMetrics.startEvent(RequestSigningTime); try { signer.sign(request, credentials); } finally { awsRequestMetrics.endEvent(RequestSigningTime); } } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (requestCount > 1) { // retry // Notify the progress listener of the retry publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); awsRequestMetrics.startEvent(RetryPauseTime); try { pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount, config.getRetryPolicy()); } finally { awsRequestMetrics.endEvent(RetryPauseTime); } } if (entity != null) { InputStream content = entity.getContent(); if (requestCount > 1) { // retry if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics); HttpContext httpContext = new BasicHttpContext(); httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics); retriedException = null; publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(HttpRequestTime); try { apacheResponse = httpClient.execute(httpRequest, httpContext); } finally { awsRequestMetrics.endEvent(HttpRequestTime); } publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); final StatusLine statusLine = apacheResponse.getStatusLine(); final int statusCode = statusLine == null ? -1 : statusLine.getStatusCode(); if (isRequestSuccessful(apacheResponse)) { awsRequestMetrics.addProperty(StatusCode, statusCode); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse); T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse, executionContext); return new Response<T>(response, httpResponse); } if (isTemporaryRedirect(apacheResponse)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = apacheResponse.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); if (log.isDebugEnabled()) log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addPropertyWith(StatusCode, statusCode) .addPropertyWith(RedirectLocation, redirectedLocation) .addPropertyWith(AWSRequestID, null); continue; } leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); final AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse); awsRequestMetrics.addPropertyWith(AWSRequestID, ase.getRequestId()) .addPropertyWith(AWSErrorCode, ase.getErrorCode()) .addPropertyWith(StatusCode, ase.getStatusCode()); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount, config.getRetryPolicy())) { throw ase; } // Comment out for now. Ref: CR2662349 // Preserve the cause of retry before retrying // awsRequestMetrics.addProperty(RetryCause, ase); if (RetryUtils.isThrottlingException(ase)) { awsRequestMetrics.incrementCounterWith(ThrottleException).addProperty(ThrottleException, ase); } // Cache the retryable exception retriedException = ase; /* * Checking for clock skew error again because we don't want to set the * global time offset for every service exception. */ if (RetryUtils.isClockSkewError(ase)) { int timeOffset = parseClockSkewOffset(apacheResponse, ase); SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset); } resetRequestAfterError(request, ase); } catch (IOException ioe) { if (log.isInfoEnabled()) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } captureExceptionMetrics(ioe, awsRequestMetrics); awsRequestMetrics.addProperty(AWSRequestID, null); AmazonClientException ace = new AmazonClientException( "Unable to execute HTTP request: " + ioe.getMessage(), ioe); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount, config.getRetryPolicy())) { throw ace; } // Cache the retryable exception retriedException = ace; resetRequestAfterError(request, ioe); } catch (RuntimeException e) { throw captureExceptionMetrics(e, awsRequestMetrics); } catch (Error e) { throw captureExceptionMetrics(e, awsRequestMetrics); } finally { /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { if (apacheResponse != null && apacheResponse.getEntity() != null && apacheResponse.getEntity().getContent() != null) { apacheResponse.getEntity().getContent().close(); } } catch (IOException e) { log.warn("Cannot close the response content.", e); } } } } /* end while (true) */ }
From source file:crawlercommons.fetcher.http.SimpleHttpFetcher.java
private FetchedResult doRequest(HttpRequestBase request, String url, Payload payload) throws BaseFetchException { LOGGER.trace("Fetching " + url); HttpResponse response;// w w w . j ava2s . c o m long readStartTime; Metadata headerMap = new Metadata(); String redirectedUrl = null; String newBaseUrl = null; int numRedirects = 0; boolean needAbort = true; String contentType = ""; String mimeType = ""; String hostAddress = null; int statusCode = HttpStatus.SC_INTERNAL_SERVER_ERROR; String reasonPhrase = null; // Create a local instance of cookie store, and bind to local context // Without this we get killed w/lots of threads, due to sync() on single // cookie store. HttpContext localContext = new BasicHttpContext(); CookieStore cookieStore = localCookieStore.get(); localContext.setAttribute(HttpClientContext.COOKIE_STORE, cookieStore); StringBuilder fetchTrace = null; if (LOGGER.isTraceEnabled()) { fetchTrace = new StringBuilder("Fetched url: " + url); } try { request.setURI(new URI(url)); readStartTime = System.currentTimeMillis(); response = _httpClient.execute(request, localContext); Header[] headers = response.getAllHeaders(); for (Header header : headers) { headerMap.add(header.getName(), header.getValue()); } statusCode = response.getStatusLine().getStatusCode(); reasonPhrase = response.getStatusLine().getReasonPhrase(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; status code: " + statusCode); if (headerMap.get(HttpHeaders.CONTENT_LENGTH) != null) { fetchTrace.append("; Content-Length: " + headerMap.get(HttpHeaders.CONTENT_LENGTH)); } if (headerMap.get(HttpHeaders.LOCATION) != null) { fetchTrace.append("; Location: " + headerMap.get(HttpHeaders.LOCATION)); } } if ((statusCode < 200) || (statusCode >= 300)) { // We can't just check against SC_OK, as some wackos return 201, // 202, etc throw new HttpFetchException(url, "Error fetching " + url + " due to \"" + reasonPhrase + "\"", statusCode, headerMap); } redirectedUrl = extractRedirectedUrl(url, localContext); URI permRedirectUri = (URI) localContext.getAttribute(PERM_REDIRECT_CONTEXT_KEY); if (permRedirectUri != null) { newBaseUrl = permRedirectUri.toURL().toExternalForm(); } Integer redirects = (Integer) localContext.getAttribute(REDIRECT_COUNT_CONTEXT_KEY); if (redirects != null) { numRedirects = redirects.intValue(); } hostAddress = (String) (localContext.getAttribute(HOST_ADDRESS)); if (hostAddress == null) { throw new UrlFetchException(url, "Host address not saved in context"); } Header cth = response.getFirstHeader(HttpHeaders.CONTENT_TYPE); if (cth != null) { contentType = cth.getValue(); } // Check if we should abort due to mime-type filtering. Note that // this will fail if the server // doesn't report a mime-type, but that's how we want it as this // configuration is typically // used when only a subset of parsers are installed/enabled, so we // don't want the auto-detect // code in Tika to get triggered & try to process an unsupported // type. If you want unknown // mime-types from the server to be processed, set "" as one of the // valid mime-types in // FetcherPolicy. mimeType = getMimeTypeFromContentType(contentType); Set<String> mimeTypes = getValidMimeTypes(); if ((mimeTypes != null) && (mimeTypes.size() > 0)) { if (!mimeTypes.contains(mimeType)) { throw new AbortedFetchException(url, "Invalid mime-type: " + mimeType, AbortedFetchReason.INVALID_MIMETYPE); } } needAbort = false; } catch (ClientProtocolException e) { // Oleg guarantees that no abort is needed in the case of an // IOException // (which is is a subclass of) needAbort = false; // If the root case was a "too many redirects" error, we want to map // this to a specific // exception that contains the final redirect. if (e.getCause() instanceof MyRedirectException) { MyRedirectException mre = (MyRedirectException) e.getCause(); String redirectUrl = url; try { redirectUrl = mre.getUri().toURL().toExternalForm(); } catch (MalformedURLException e2) { LOGGER.warn("Invalid URI saved during redirect handling: " + mre.getUri()); } throw new RedirectFetchException(url, redirectUrl, mre.getReason()); } else if (e.getCause() instanceof RedirectException) { LOGGER.error(e.getMessage()); throw new RedirectFetchException(url, extractRedirectedUrl(url, localContext), RedirectExceptionReason.TOO_MANY_REDIRECTS); } else { throw new IOFetchException(url, e); } } catch (IOException e) { // Oleg guarantees that no abort is needed in the case of an // IOException needAbort = false; throw new IOFetchException(url, e); } catch (URISyntaxException e) { throw new UrlFetchException(url, e.getMessage()); } catch (IllegalStateException e) { throw new UrlFetchException(url, e.getMessage()); } catch (BaseFetchException e) { throw e; } catch (Exception e) { // Map anything else to a generic IOFetchException // TODO KKr - create generic fetch exception throw new IOFetchException(url, new IOException(e)); } finally { safeAbort(needAbort, request); } // Figure out how much data we want to try to fetch. int maxContentSize = getMaxContentSize(mimeType); int targetLength = maxContentSize; boolean truncated = false; String contentLengthStr = headerMap.get(HttpHeaders.CONTENT_LENGTH); if (contentLengthStr != null) { try { int contentLength = Integer.parseInt(contentLengthStr); if (contentLength > targetLength) { truncated = true; } else { targetLength = contentLength; } } catch (NumberFormatException e) { // Ignore (and log) invalid content length values. LOGGER.warn("Invalid content length in header: " + contentLengthStr); } } // Now finally read in response body, up to targetLength bytes. // Note that entity might be null, for zero length responses. byte[] content = new byte[0]; long readRate = 0; HttpEntity entity = response.getEntity(); needAbort = true; if (entity != null) { InputStream in = null; try { in = entity.getContent(); byte[] buffer = new byte[BUFFER_SIZE]; int bytesRead = 0; int totalRead = 0; ByteArrayOutputStream out = new ByteArrayOutputStream(DEFAULT_BYTEARRAY_SIZE); int readRequests = 0; int minResponseRate = getMinResponseRate(); // TODO KKr - we need to monitor the rate while reading a // single block. Look at HttpClient // metrics support for how to do this. Once we fix this, fix // the test to read a smaller (< 20K) // chuck of data. while ((totalRead < targetLength) && ((bytesRead = in.read(buffer, 0, Math.min(buffer.length, targetLength - totalRead))) != -1)) { readRequests += 1; totalRead += bytesRead; out.write(buffer, 0, bytesRead); // Assume read time is at least one millisecond, to avoid // DBZ exception. long totalReadTime = Math.max(1, System.currentTimeMillis() - readStartTime); readRate = (totalRead * 1000L) / totalReadTime; // Don't bail on the first read cycle, as we can get a // hiccup starting out. // Also don't bail if we've read everything we need. if ((readRequests > 1) && (totalRead < targetLength) && (readRate < minResponseRate)) { throw new AbortedFetchException(url, "Slow response rate of " + readRate + " bytes/sec", AbortedFetchReason.SLOW_RESPONSE_RATE); } // Check to see if we got interrupted, but don't clear the // interrupted flag. if (Thread.currentThread().isInterrupted()) { throw new AbortedFetchException(url, AbortedFetchReason.INTERRUPTED); } } content = out.toByteArray(); needAbort = truncated || (in.available() > 0); } catch (IOException e) { // We don't need to abort if there's an IOException throw new IOFetchException(url, e); } finally { safeAbort(needAbort, request); safeClose(in); } } // Toss truncated image content. if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated image", AbortedFetchReason.CONTENT_SIZE); } // Now see if we need to uncompress the content. String contentEncoding = headerMap.get(HttpHeaders.CONTENT_ENCODING); if (contentEncoding != null) { if (LOGGER.isTraceEnabled()) { fetchTrace.append("; Content-Encoding: " + contentEncoding); } // TODO KKr We might want to just decompress a truncated gzip // containing text (since we have a max content size to save us // from any gzip corruption). We might want to break the following // out into a separate method, by the way (if not refactor this // entire monolithic method). // try { if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) { if (truncated) { throw new AbortedFetchException(url, "Truncated compressed data", AbortedFetchReason.CONTENT_SIZE); } else { ExpandedResult expandedResult = EncodingUtils.processGzipEncoded(content, maxContentSize); truncated = expandedResult.isTruncated(); if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated decompressed image", AbortedFetchReason.CONTENT_SIZE); } else { content = expandedResult.getExpanded(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; unzipped to " + content.length + " bytes"); } } // } else if ("deflate".equals(contentEncoding)) { // content = // EncodingUtils.processDeflateEncoded(content); // if (LOGGER.isTraceEnabled()) { // fetchTrace.append("; inflated to " + content.length + // " bytes"); // } } } } catch (IOException e) { throw new IOFetchException(url, e); } } // Finally dump out the trace msg we've been building. if (LOGGER.isTraceEnabled()) { LOGGER.trace(fetchTrace.toString()); } // TODO KKr - Save truncated flag in FetchedResult/FetchedDatum. return new FetchedResult(url, redirectedUrl, System.currentTimeMillis(), headerMap, content, contentType, (int) readRate, payload, newBaseUrl, numRedirects, hostAddress, statusCode, reasonPhrase); }
From source file:org.fcrepo.test.api.TestRESTAPI.java
private HttpResponse getOrDelete(HttpClient client, HttpRequestBase method, boolean authenticate, boolean validate) throws Exception { LOGGER.debug(method.getURI().toString()); if (!(method instanceof HttpGet || method instanceof HttpDelete)) { throw new IllegalArgumentException("method must be one of GET or DELETE."); }//from w w w . j a va2 s . co m HttpResponse response = client.execute(method); if (response.getStatusLine().getStatusCode() == SC_MOVED_TEMPORARILY) { String redir = response.getFirstHeader(HttpHeaders.LOCATION).getValue(); if (!method.getURI().toString().equals(redir)) { method.setURI(getURI(redir)); response = getOrDelete(client, method, authenticate, validate); } } if (validate) { validateResponse(method.getURI(), response); } return response; }