Example usage for org.apache.http.protocol HttpContext setAttribute

List of usage examples for org.apache.http.protocol HttpContext setAttribute

Introduction

In this page you can find the example usage for org.apache.http.protocol HttpContext setAttribute.

Prototype

void setAttribute(String str, Object obj);

Source Link

Usage

From source file:com.uber.jaeger.httpclient.SpanCreationRequestInterceptor.java

@Override
public void process(HttpRequest httpRequest, HttpContext httpContext) throws HttpException, IOException {
    try {/* w  w w.  ja v  a  2  s . c  om*/
        TraceContext parentContext = TracingUtils.getTraceContext();

        Tracer.SpanBuilder clientSpanBuilder = tracer.buildSpan(getOperationName(httpRequest));
        if (!parentContext.isEmpty()) {
            clientSpanBuilder.asChildOf(parentContext.getCurrentSpan());
        }

        Span clientSpan = clientSpanBuilder.startManual();

        RequestLine requestLine = httpRequest.getRequestLine();

        Tags.SPAN_KIND.set(clientSpan, Tags.SPAN_KIND_CLIENT);
        Tags.HTTP_URL.set(clientSpan, requestLine.getUri());

        if (httpContext instanceof HttpClientContext) {
            HttpHost host = ((HttpClientContext) httpContext).getTargetHost();
            Tags.PEER_HOSTNAME.set(clientSpan, host.getHostName());
            Tags.PEER_PORT.set(clientSpan, host.getPort());
        }

        onSpanStarted(clientSpan, httpRequest, httpContext);

        httpContext.setAttribute(Constants.CURRENT_SPAN_CONTEXT_KEY, clientSpan);
    } catch (Exception e) {
        log.error("Could not start client tracing span.", e);
    }
}

From source file:org.apache.ambari.view.hive.client.Connection.java

private CloseableHttpClient getHttpClient(Boolean useSsl) throws SQLException {
    boolean isCookieEnabled = authParams.get(Utils.HiveAuthenticationParams.COOKIE_AUTH) == null
            || (!Utils.HiveAuthenticationParams.COOKIE_AUTH_FALSE
                    .equalsIgnoreCase(authParams.get(Utils.HiveAuthenticationParams.COOKIE_AUTH)));
    String cookieName = authParams.get(Utils.HiveAuthenticationParams.COOKIE_NAME) == null
            ? Utils.HiveAuthenticationParams.DEFAULT_COOKIE_NAMES_HS2
            : authParams.get(Utils.HiveAuthenticationParams.COOKIE_NAME);
    CookieStore cookieStore = isCookieEnabled ? new BasicCookieStore() : null;
    HttpClientBuilder httpClientBuilder;
    // Request interceptor for any request pre-processing logic
    HttpRequestInterceptor requestInterceptor;
    Map<String, String> additionalHttpHeaders = new HashMap<String, String>();

    // Retrieve the additional HttpHeaders
    for (Map.Entry<String, String> entry : authParams.entrySet()) {
        String key = entry.getKey();

        if (key.startsWith(Utils.HiveAuthenticationParams.HTTP_HEADER_PREFIX)) {
            additionalHttpHeaders.put(key.substring(Utils.HiveAuthenticationParams.HTTP_HEADER_PREFIX.length()),
                    entry.getValue());/* w w  w  .  j a  v a 2  s.c  o  m*/
        }
    }
    // Configure http client for kerberos/password based authentication
    if (isKerberosAuthMode()) {
        /**
         * Add an interceptor which sets the appropriate header in the request.
         * It does the kerberos authentication and get the final service ticket,
         * for sending to the server before every request.
         * In https mode, the entire information is encrypted
         */

        Boolean assumeSubject = Utils.HiveAuthenticationParams.AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT
                .equals(authParams.get(Utils.HiveAuthenticationParams.AUTH_KERBEROS_AUTH_TYPE));
        requestInterceptor = new HttpKerberosRequestInterceptor(
                authParams.get(Utils.HiveAuthenticationParams.AUTH_PRINCIPAL), host, getServerHttpUrl(useSsl),
                assumeSubject, cookieStore, cookieName, useSsl, additionalHttpHeaders);
    } else {
        /**
         * Add an interceptor to pass username/password in the header.
         * In https mode, the entire information is encrypted
         */
        requestInterceptor = new HttpBasicAuthInterceptor(
                getAuthParamDefault(Utils.HiveAuthenticationParams.AUTH_USER, getUsername()), getPassword(),
                cookieStore, cookieName, useSsl, additionalHttpHeaders);
    }
    // Configure http client for cookie based authentication
    if (isCookieEnabled) {
        // Create a http client with a retry mechanism when the server returns a status code of 401.
        httpClientBuilder = HttpClients.custom()
                .setServiceUnavailableRetryStrategy(new ServiceUnavailableRetryStrategy() {

                    @Override
                    public boolean retryRequest(final HttpResponse response, final int executionCount,
                            final HttpContext context) {
                        int statusCode = response.getStatusLine().getStatusCode();
                        boolean ret = statusCode == 401 && executionCount <= 1;

                        // Set the context attribute to true which will be interpreted by the request interceptor
                        if (ret) {
                            context.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_RETRY_TRUE);
                        }
                        return ret;
                    }

                    @Override
                    public long getRetryInterval() {
                        // Immediate retry
                        return 0;
                    }
                });
    } else {
        httpClientBuilder = HttpClientBuilder.create();
    }
    // Add the request interceptor to the client builder
    httpClientBuilder.addInterceptorFirst(requestInterceptor);
    // Configure http client for SSL
    if (useSsl) {
        String useTwoWaySSL = authParams.get(Utils.HiveAuthenticationParams.USE_TWO_WAY_SSL);
        String sslTrustStorePath = authParams.get(Utils.HiveAuthenticationParams.SSL_TRUST_STORE);
        String sslTrustStorePassword = authParams.get(Utils.HiveAuthenticationParams.SSL_TRUST_STORE_PASSWORD);
        KeyStore sslTrustStore;
        SSLSocketFactory socketFactory;

        /**
         * The code within the try block throws:
         * 1. SSLInitializationException
         * 2. KeyStoreException
         * 3. IOException
         * 4. NoSuchAlgorithmException
         * 5. CertificateException
         * 6. KeyManagementException
         * 7. UnrecoverableKeyException
         * We don't want the client to retry on any of these, hence we catch all
         * and throw a SQLException.
         */
        try {
            if (useTwoWaySSL != null && useTwoWaySSL.equalsIgnoreCase(Utils.HiveAuthenticationParams.TRUE)) {
                socketFactory = getTwoWaySSLSocketFactory();
            } else if (sslTrustStorePath == null || sslTrustStorePath.isEmpty()) {
                // Create a default socket factory based on standard JSSE trust material
                socketFactory = SSLSocketFactory.getSocketFactory();
            } else {
                // Pick trust store config from the given path
                sslTrustStore = KeyStore.getInstance(Utils.HiveAuthenticationParams.SSL_TRUST_STORE_TYPE);
                try (FileInputStream fis = new FileInputStream(sslTrustStorePath)) {
                    sslTrustStore.load(fis, sslTrustStorePassword.toCharArray());
                }
                socketFactory = new SSLSocketFactory(sslTrustStore);
            }
            socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);

            final Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create()
                    .register("https", socketFactory).build();

            httpClientBuilder.setConnectionManager(new BasicHttpClientConnectionManager(registry));
        } catch (Exception e) {
            String msg = "Could not create an https connection to " + getServerHttpUrl(useSsl) + ". "
                    + e.getMessage();
            throw new SQLException(msg, " 08S01", e);
        }
    }
    return httpClientBuilder.build();
}

From source file:com.amazonaws.http.AmazonHttpClient.java

/**
 * Internal method to execute the HTTP method given.
 *
 * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler)
 * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext)
 *//*from   w  w  w.  j av  a2s. c  o  m*/
private <T> Response<T> executeHelper(final Request<?> request,
        HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler,
        HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext)
        throws AmazonClientException, AmazonServiceException {
    /*
     * Depending on which response handler we end up choosing to handle the
     * HTTP response, it might require us to leave the underlying HTTP
     * connection open, depending on whether or not it reads the complete
     * HTTP response stream from the HTTP connection, or if delays reading
     * any of the content until after a response is returned to the caller.
     */
    boolean leaveHttpConnectionOpen = false;
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    /* add the service endpoint to the logs. You can infer service name from service endpoint */
    awsRequestMetrics.addProperty(Field.ServiceName, request.getServiceName());
    awsRequestMetrics.addProperty(Field.ServiceEndpoint, request.getEndpoint());

    // Apply whatever request options we know how to handle, such as user-agent.
    setUserAgent(request);
    int requestCount = 0;
    URI redirectedURI = null;
    HttpEntity entity = null;
    AmazonClientException retriedException = null;

    // Make a copy of the original request params and headers so that we can
    // permute it in this loop and start over with the original every time.
    Map<String, String> originalParameters = new LinkedHashMap<String, String>();
    originalParameters.putAll(request.getParameters());
    Map<String, String> originalHeaders = new HashMap<String, String>();
    originalHeaders.putAll(request.getHeaders());
    final AWSCredentials credentials = executionContext.getCredentials();
    AmazonWebServiceRequest awsreq = request.getOriginalRequest();
    ProgressListener listener = awsreq.getGeneralProgressListener();
    Signer signer = null;

    while (true) {
        ++requestCount;
        awsRequestMetrics.setCounter(Field.RequestCount, requestCount);
        if (requestCount > 1) { // retry
            request.setParameters(originalParameters);
            request.setHeaders(originalHeaders);
        }
        HttpRequestBase httpRequest = null;
        org.apache.http.HttpResponse apacheResponse = null;

        try {
            // Sign the request if a signer was provided
            if (signer == null)
                signer = executionContext.getSignerByURI(request.getEndpoint());
            if (signer != null && credentials != null) {
                awsRequestMetrics.startEvent(Field.RequestSigningTime);
                try {
                    signer.sign(request, credentials);
                } finally {
                    awsRequestMetrics.endEvent(Field.RequestSigningTime);
                }
            }

            if (requestLog.isDebugEnabled()) {
                requestLog.debug("Sending Request: " + request.toString());
            }

            httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext);

            if (httpRequest instanceof HttpEntityEnclosingRequest) {
                entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity();
            }

            if (redirectedURI != null) {
                httpRequest.setURI(redirectedURI);
            }

            if (requestCount > 1) { // retry
                // Notify the progress listener of the retry
                publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT);

                awsRequestMetrics.startEvent(Field.RetryPauseTime);
                try {
                    pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount,
                            config.getRetryPolicy());
                } finally {
                    awsRequestMetrics.endEvent(Field.RetryPauseTime);
                }
            }

            if (entity != null) {
                InputStream content = entity.getContent();
                if (requestCount > 1) { // retry
                    if (content.markSupported()) {
                        content.reset();
                        content.mark(-1);
                    }
                } else {
                    if (content.markSupported()) {
                        content.mark(-1);
                    }
                }
            }

            captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics);
            HttpContext httpContext = new BasicHttpContext();
            httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics);
            retriedException = null;

            publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT);
            awsRequestMetrics.startEvent(Field.HttpRequestTime);
            try {
                apacheResponse = httpClient.execute(httpRequest, httpContext);
            } finally {
                awsRequestMetrics.endEvent(Field.HttpRequestTime);
            }
            publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT);

            if (isRequestSuccessful(apacheResponse)) {
                awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode());
                /*
                 * If we get back any 2xx status code, then we know we should
                 * treat the service call as successful.
                 */
                leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen();
                HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse);
                T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse,
                        executionContext);
                return new Response<T>(response, httpResponse);
            } else if (isTemporaryRedirect(apacheResponse)) {
                /*
                 * S3 sends 307 Temporary Redirects if you try to delete an
                 * EU bucket from the US endpoint. If we get a 307, we'll
                 * point the HTTP method to the redirected location, and let
                 * the next retry deliver the request to the right location.
                 */
                Header[] locationHeaders = apacheResponse.getHeaders("location");
                String redirectedLocation = locationHeaders[0].getValue();
                log.debug("Redirecting to: " + redirectedLocation);
                redirectedURI = URI.create(redirectedLocation);
                httpRequest.setURI(redirectedURI);
                awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode());
                awsRequestMetrics.addProperty(Field.RedirectLocation, redirectedLocation);
                awsRequestMetrics.addProperty(Field.AWSRequestID, null);

            } else {
                leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen();
                AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest,
                        apacheResponse);
                awsRequestMetrics.addProperty(Field.AWSRequestID, ase.getRequestId());
                awsRequestMetrics.addProperty(Field.AWSErrorCode, ase.getErrorCode());
                awsRequestMetrics.addProperty(Field.StatusCode, ase.getStatusCode());

                if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount,
                        config.getRetryPolicy())) {
                    throw ase;
                }

                // Cache the retryable exception
                retriedException = ase;
                /*
                 * Checking for clock skew error again because we don't want to set the
                 * global time offset for every service exception.
                 */
                if (RetryUtils.isClockSkewError(ase)) {
                    int timeOffset = parseClockSkewOffset(apacheResponse, ase);
                    SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset);
                }
                resetRequestAfterError(request, ase);
            }
        } catch (IOException ioe) {
            if (log.isInfoEnabled()) {
                log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe);
            }
            awsRequestMetrics.incrementCounter(Field.Exception);
            awsRequestMetrics.addProperty(Field.Exception, ioe);
            awsRequestMetrics.addProperty(Field.AWSRequestID, null);

            AmazonClientException ace = new AmazonClientException(
                    "Unable to execute HTTP request: " + ioe.getMessage(), ioe);
            if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount,
                    config.getRetryPolicy())) {
                throw ace;
            }

            // Cache the retryable exception
            retriedException = ace;
            resetRequestAfterError(request, ioe);
        } catch (RuntimeException e) {
            throw handleUnexpectedFailure(e, awsRequestMetrics);
        } catch (Error e) {
            throw handleUnexpectedFailure(e, awsRequestMetrics);
        } finally {
            /*
             * Some response handlers need to manually manage the HTTP
             * connection and will take care of releasing the connection on
             * their own, but if this response handler doesn't need the
             * connection left open, we go ahead and release the it to free
             * up resources.
             */
            if (!leaveHttpConnectionOpen) {
                try {
                    if (apacheResponse != null && apacheResponse.getEntity() != null
                            && apacheResponse.getEntity().getContent() != null) {
                        apacheResponse.getEntity().getContent().close();
                    }
                } catch (IOException e) {
                    log.warn("Cannot close the response content.", e);
                }
            }
        }
    } /* end while (true) */
}

From source file:crawlercommons.fetcher.SimpleHttpFetcher.java

private FetchedResult doRequest(HttpRequestBase request, String url, Payload payload)
        throws BaseFetchException {
    LOGGER.trace("Fetching " + url);

    HttpResponse response;//w ww  .  j av  a 2s.  co m
    long readStartTime;
    Metadata headerMap = new Metadata();
    String redirectedUrl = null;
    String newBaseUrl = null;
    int numRedirects = 0;
    boolean needAbort = true;
    String contentType = "";
    String mimeType = "";
    String hostAddress = null;

    // Create a local instance of cookie store, and bind to local context
    // Without this we get killed w/lots of threads, due to sync() on single cookie store.
    HttpContext localContext = new BasicHttpContext();
    CookieStore cookieStore = new BasicCookieStore();
    localContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore);

    StringBuilder fetchTrace = null;
    if (LOGGER.isTraceEnabled()) {
        fetchTrace = new StringBuilder("Fetched url: " + url);
    }

    try {
        request.setURI(new URI(url));

        readStartTime = System.currentTimeMillis();
        response = _httpClient.execute(request, localContext);

        Header[] headers = response.getAllHeaders();
        for (Header header : headers) {
            headerMap.add(header.getName(), header.getValue());
        }

        int httpStatus = response.getStatusLine().getStatusCode();
        if (LOGGER.isTraceEnabled()) {
            fetchTrace.append("; status code: " + httpStatus);
            if (headerMap.get(HttpHeaders.CONTENT_LENGTH) != null) {
                fetchTrace.append("; Content-Length: " + headerMap.get(HttpHeaders.CONTENT_LENGTH));
            }

            if (headerMap.get(HttpHeaders.LOCATION) != null) {
                fetchTrace.append("; Location: " + headerMap.get(HttpHeaders.LOCATION));
            }
        }

        if ((httpStatus < 200) || (httpStatus >= 300)) {
            // We can't just check against SC_OK, as some wackos return 201, 202, etc
            throw new HttpFetchException(url, "Error fetching " + url, httpStatus, headerMap);
        }

        redirectedUrl = extractRedirectedUrl(url, localContext);

        URI permRedirectUri = (URI) localContext.getAttribute(PERM_REDIRECT_CONTEXT_KEY);
        if (permRedirectUri != null) {
            newBaseUrl = permRedirectUri.toURL().toExternalForm();
        }

        Integer redirects = (Integer) localContext.getAttribute(REDIRECT_COUNT_CONTEXT_KEY);
        if (redirects != null) {
            numRedirects = redirects.intValue();
        }

        hostAddress = (String) (localContext.getAttribute(HOST_ADDRESS));
        if (hostAddress == null) {
            throw new UrlFetchException(url, "Host address not saved in context");
        }

        Header cth = response.getFirstHeader(HttpHeaders.CONTENT_TYPE);
        if (cth != null) {
            contentType = cth.getValue();
        }

        // Check if we should abort due to mime-type filtering. Note that this will fail if the server
        // doesn't report a mime-type, but that's how we want it as this configuration is typically
        // used when only a subset of parsers are installed/enabled, so we don't want the auto-detect
        // code in Tika to get triggered & try to process an unsupported type. If you want unknown
        // mime-types from the server to be processed, set "" as one of the valid mime-types in FetcherPolicy.
        mimeType = getMimeTypeFromContentType(contentType);
        Set<String> mimeTypes = getValidMimeTypes();
        if ((mimeTypes != null) && (mimeTypes.size() > 0)) {
            if (!mimeTypes.contains(mimeType)) {
                throw new AbortedFetchException(url, "Invalid mime-type: " + mimeType,
                        AbortedFetchReason.INVALID_MIMETYPE);
            }
        }

        needAbort = false;
    } catch (ClientProtocolException e) {
        // Oleg guarantees that no abort is needed in the case of an IOException (which is is a subclass of)
        needAbort = false;

        // If the root case was a "too many redirects" error, we want to map this to a specific
        // exception that contains the final redirect.
        if (e.getCause() instanceof MyRedirectException) {
            MyRedirectException mre = (MyRedirectException) e.getCause();
            String redirectUrl = url;

            try {
                redirectUrl = mre.getUri().toURL().toExternalForm();
            } catch (MalformedURLException e2) {
                LOGGER.warn("Invalid URI saved during redirect handling: " + mre.getUri());
            }

            throw new RedirectFetchException(url, redirectUrl, mre.getReason());
        } else if (e.getCause() instanceof RedirectException) {
            throw new RedirectFetchException(url, extractRedirectedUrl(url, localContext),
                    RedirectExceptionReason.TOO_MANY_REDIRECTS);
        } else {
            throw new IOFetchException(url, e);
        }
    } catch (IOException e) {
        // Oleg guarantees that no abort is needed in the case of an IOException
        needAbort = false;

        if (e instanceof ConnectionPoolTimeoutException) {
            // Should never happen, so let's dump some info about the connection pool.
            ThreadSafeClientConnManager cm = (ThreadSafeClientConnManager) _httpClient.getConnectionManager();
            int numConnections = cm.getConnectionsInPool();
            cm.closeIdleConnections(0, TimeUnit.MILLISECONDS);
            LOGGER.error(String.format(
                    "Got ConnectionPoolTimeoutException: %d connections before, %d after idle close",
                    numConnections, cm.getConnectionsInPool()));
        }

        throw new IOFetchException(url, e);
    } catch (URISyntaxException e) {
        throw new UrlFetchException(url, e.getMessage());
    } catch (IllegalStateException e) {
        throw new UrlFetchException(url, e.getMessage());
    } catch (BaseFetchException e) {
        throw e;
    } catch (Exception e) {
        // Map anything else to a generic IOFetchException
        // TODO KKr - create generic fetch exception
        throw new IOFetchException(url, new IOException(e));
    } finally {
        safeAbort(needAbort, request);
    }

    // Figure out how much data we want to try to fetch.
    int maxContentSize = getMaxContentSize(mimeType);
    int targetLength = maxContentSize;
    boolean truncated = false;
    String contentLengthStr = headerMap.get(HttpHeaders.CONTENT_LENGTH);
    if (contentLengthStr != null) {
        try {
            int contentLength = Integer.parseInt(contentLengthStr);
            if (contentLength > targetLength) {
                truncated = true;
            } else {
                targetLength = contentLength;
            }
        } catch (NumberFormatException e) {
            // Ignore (and log) invalid content length values.
            LOGGER.warn("Invalid content length in header: " + contentLengthStr);
        }
    }

    // Now finally read in response body, up to targetLength bytes.
    // Note that entity might be null, for zero length responses.
    byte[] content = new byte[0];
    long readRate = 0;
    HttpEntity entity = response.getEntity();
    needAbort = true;

    if (entity != null) {
        InputStream in = null;

        try {
            in = entity.getContent();
            byte[] buffer = new byte[BUFFER_SIZE];
            int bytesRead = 0;
            int totalRead = 0;
            ByteArrayOutputStream out = new ByteArrayOutputStream(DEFAULT_BYTEARRAY_SIZE);

            int readRequests = 0;
            int minResponseRate = getMinResponseRate();
            // TODO KKr - we need to monitor the rate while reading a
            // single block. Look at HttpClient
            // metrics support for how to do this. Once we fix this, fix
            // the test to read a smaller (< 20K)
            // chuck of data.
            while ((totalRead < targetLength) && ((bytesRead = in.read(buffer, 0,
                    Math.min(buffer.length, targetLength - totalRead))) != -1)) {
                readRequests += 1;
                totalRead += bytesRead;
                out.write(buffer, 0, bytesRead);

                // Assume read time is at least one millisecond, to avoid DBZ exception.
                long totalReadTime = Math.max(1, System.currentTimeMillis() - readStartTime);
                readRate = (totalRead * 1000L) / totalReadTime;

                // Don't bail on the first read cycle, as we can get a hiccup starting out.
                // Also don't bail if we've read everything we need.
                if ((readRequests > 1) && (totalRead < targetLength) && (readRate < minResponseRate)) {
                    throw new AbortedFetchException(url, "Slow response rate of " + readRate + " bytes/sec",
                            AbortedFetchReason.SLOW_RESPONSE_RATE);
                }

                // Check to see if we got interrupted.
                if (Thread.interrupted()) {
                    throw new AbortedFetchException(url, AbortedFetchReason.INTERRUPTED);
                }
            }

            content = out.toByteArray();
            needAbort = truncated || (in.available() > 0);
        } catch (IOException e) {
            // We don't need to abort if there's an IOException
            throw new IOFetchException(url, e);
        } finally {
            safeAbort(needAbort, request);
            safeClose(in);
        }
    }

    // Toss truncated image content.
    if ((truncated) && (!isTextMimeType(mimeType))) {
        throw new AbortedFetchException(url, "Truncated image", AbortedFetchReason.CONTENT_SIZE);
    }

    // Now see if we need to uncompress the content.
    String contentEncoding = headerMap.get(HttpHeaders.CONTENT_ENCODING);
    if (contentEncoding != null) {
        if (LOGGER.isTraceEnabled()) {
            fetchTrace.append("; Content-Encoding: " + contentEncoding);
        }

        // TODO KKr We might want to just decompress a truncated gzip
        // containing text (since we have a max content size to save us
        // from any gzip corruption). We might want to break the following 
        // out into a separate method, by the way (if not refactor this 
        // entire monolithic method).
        //
        try {
            if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) {
                if (truncated) {
                    throw new AbortedFetchException(url, "Truncated compressed data",
                            AbortedFetchReason.CONTENT_SIZE);
                } else {
                    ExpandedResult expandedResult = EncodingUtils.processGzipEncoded(content, maxContentSize);
                    truncated = expandedResult.isTruncated();
                    if ((truncated) && (!isTextMimeType(mimeType))) {
                        throw new AbortedFetchException(url, "Truncated decompressed image",
                                AbortedFetchReason.CONTENT_SIZE);
                    } else {
                        content = expandedResult.getExpanded();
                        if (LOGGER.isTraceEnabled()) {
                            fetchTrace.append("; unzipped to " + content.length + " bytes");
                        }
                    }
                    //                    } else if ("deflate".equals(contentEncoding)) {
                    //                        content = EncodingUtils.processDeflateEncoded(content);
                    //                        if (LOGGER.isTraceEnabled()) {
                    //                            fetchTrace.append("; inflated to " + content.length + " bytes");
                    //                        }
                }
            }
        } catch (IOException e) {
            throw new IOFetchException(url, e);
        }
    }

    // Finally dump out the trace msg we've been building.
    if (LOGGER.isTraceEnabled()) {
        LOGGER.trace(fetchTrace.toString());
    }

    // TODO KKr - Save truncated flag in FetchedResult/FetchedDatum.
    return new FetchedResult(url, redirectedUrl, System.currentTimeMillis(), headerMap, content, contentType,
            (int) readRate, payload, newBaseUrl, numRedirects, hostAddress);
}

From source file:net.kungfoo.grizzly.proxy.impl.ConnectingHandler.java

/**
 * Triggered when the connection is ready to send an HTTP request.
 *
 * @see NHttpClientConnection/*from   ww  w. j a  va  2 s.c o m*/
 *
 * @param conn HTTP connection that is ready to send an HTTP request
 */
public void requestReady(final NHttpClientConnection conn) {
    System.out.println(conn + " [proxy->origin] request ready");

    HttpContext context = conn.getContext();
    ProxyProcessingInfo proxyTask = (ProxyProcessingInfo) context.getAttribute(ProxyProcessingInfo.ATTRIB);

    // TODO: change it to ReentrantLock
    synchronized (proxyTask) {
        if (requestReadyValidateConnectionState(proxyTask))
            return;

        HttpRequest request = proxyTask.getRequest();
        if (request == null) {
            throw new IllegalStateException("HTTP request is null");
        }

        requestReadyCleanUpHeaders(request);

        HttpHost targetHost = proxyTask.getTarget();

        try {

            request.setParams(new DefaultedHttpParams(request.getParams(), this.params));

            // Pre-process HTTP request
            context.setAttribute(ExecutionContext.HTTP_CONNECTION, conn);
            context.setAttribute(ExecutionContext.HTTP_TARGET_HOST, targetHost);

            this.httpProcessor.process(request, context);
            // and send it to the origin server
            Request originalRequest = proxyTask.getOriginalRequest();
            int length = originalRequest.getContentLength();
            if (length > 0) {
                BasicHttpEntity httpEntity = new BasicHttpEntity();
                httpEntity.setContentLength(originalRequest.getContentLengthLong());
                /*
                          httpEntity.setContent(((InternalInputBuffer) originalRequest.getInputBuffer()).getInputStream());
                          ((BasicHttpEntityEnclosingRequest) request).setEntity(httpEntity);
                */
            }
            conn.submitRequest(request);
            // Update connection state
            proxyTask.setOriginState(ConnState.REQUEST_SENT);

            System.out.println(conn + " [proxy->origin] >> " + request.getRequestLine().toString());

        } catch (IOException ex) {
            shutdownConnection(conn);
        } catch (HttpException ex) {
            shutdownConnection(conn);
        }

    }
}

From source file:com.uber.jaeger.httpclient.TracingRequestInterceptor.java

@Override
public void process(HttpRequest httpRequest, HttpContext httpContext) throws HttpException, IOException {
    try {/*from w  w  w.jav  a 2 s. c o m*/
        TraceContext parentContext = TracingUtils.getTraceContext();

        RequestLine requestLine = httpRequest.getRequestLine();
        Tracer.SpanBuilder clientSpanBuilder = tracer.buildSpan(getOperationName(httpRequest));
        if (!parentContext.isEmpty()) {
            clientSpanBuilder.asChildOf(parentContext.getCurrentSpan());
        }

        Span clientSpan = clientSpanBuilder.start();
        Tags.SPAN_KIND.set(clientSpan, Tags.SPAN_KIND_CLIENT);
        Tags.HTTP_URL.set(clientSpan, requestLine.getUri());

        if (httpContext instanceof HttpClientContext) {
            HttpHost host = ((HttpClientContext) httpContext).getTargetHost();
            Tags.PEER_HOSTNAME.set(clientSpan, host.getHostName());
            Tags.PEER_PORT.set(clientSpan, (short) host.getPort());
        }

        onSpanStarted(clientSpan, httpRequest, httpContext);

        tracer.inject(clientSpan.context(), Format.Builtin.HTTP_HEADERS, new ClientRequestCarrier(httpRequest));

        httpContext.setAttribute(Constants.CURRENT_SPAN_CONTEXT_KEY, clientSpan);
    } catch (Exception e) {
        logger.error("Could not start client tracing span.", e);
    }
}

From source file:com.amazonaws.client.service.AmazonHttpClient.java

/**
 * Internal method to execute the HTTP method given.
 *
 * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler)
 * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext)
 *//*  www .ja v a2s.c  om*/
private <T> Response<T> executeHelper(final Request<?> request,
        HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler,
        HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext)
        throws AmazonClientException, AmazonServiceException {
    /*
     * Depending on which response handler we end up choosing to handle the
     * HTTP response, it might require us to leave the underlying HTTP
     * connection open, depending on whether or not it reads the complete
     * HTTP response stream from the HTTP connection, or if delays reading
     * any of the content until after a response is returned to the caller.
     */
    boolean leaveHttpConnectionOpen = false;
    /* add the service endpoint to the logs. You can infer service name from service endpoint */
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics()
            .addPropertyWith(ServiceName, request.getServiceName())
            .addPropertyWith(ServiceEndpoint, request.getEndpoint());

    // Apply whatever request options we know how to handle, such as user-agent.
    setUserAgent(request);
    int requestCount = 0;
    URI redirectedURI = null;
    HttpEntity entity = null;
    AmazonClientException retriedException = null;

    // Make a copy of the original request params and headers so that we can
    // permute it in this loop and start over with the original every time.
    Map<String, String> originalParameters = new LinkedHashMap<String, String>();
    originalParameters.putAll(request.getParameters());
    Map<String, String> originalHeaders = new HashMap<String, String>();
    originalHeaders.putAll(request.getHeaders());
    final AWSCredentials credentials = executionContext.getCredentials();
    AmazonWebServiceRequest awsreq = request.getOriginalRequest();
    ProgressListener listener = awsreq.getGeneralProgressListener();
    Signer signer = null;

    while (true) {
        ++requestCount;
        awsRequestMetrics.setCounter(RequestCount, requestCount);
        if (requestCount > 1) { // retry
            request.setParameters(originalParameters);
            request.setHeaders(originalHeaders);
        }
        HttpRequestBase httpRequest = null;
        org.apache.http.HttpResponse apacheResponse = null;

        try {
            // Sign the request if a signer was provided
            if (signer == null)
                signer = executionContext.getSignerByURI(request.getEndpoint());
            if (signer != null && credentials != null) {
                awsRequestMetrics.startEvent(RequestSigningTime);
                try {
                    signer.sign(request, credentials);
                } finally {
                    awsRequestMetrics.endEvent(RequestSigningTime);
                }
            }

            if (requestLog.isDebugEnabled()) {
                requestLog.debug("Sending Request: " + request.toString());
            }

            httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext);

            if (httpRequest instanceof HttpEntityEnclosingRequest) {
                entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity();
            }

            if (redirectedURI != null) {
                httpRequest.setURI(redirectedURI);
            }

            if (requestCount > 1) { // retry
                // Notify the progress listener of the retry
                publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT);

                awsRequestMetrics.startEvent(RetryPauseTime);
                try {
                    pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount,
                            config.getRetryPolicy());
                } finally {
                    awsRequestMetrics.endEvent(RetryPauseTime);
                }
            }

            if (entity != null) {
                InputStream content = entity.getContent();
                if (requestCount > 1) { // retry
                    if (content.markSupported()) {
                        content.reset();
                        content.mark(-1);
                    }
                } else {
                    if (content.markSupported()) {
                        content.mark(-1);
                    }
                }
            }

            captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics);
            HttpContext httpContext = new BasicHttpContext();
            httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics);
            retriedException = null;

            publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT);
            awsRequestMetrics.startEvent(HttpRequestTime);
            try {
                apacheResponse = httpClient.execute(httpRequest, httpContext);
            } finally {
                awsRequestMetrics.endEvent(HttpRequestTime);
            }
            publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT);
            final StatusLine statusLine = apacheResponse.getStatusLine();
            final int statusCode = statusLine == null ? -1 : statusLine.getStatusCode();
            if (isRequestSuccessful(apacheResponse)) {
                awsRequestMetrics.addProperty(StatusCode, statusCode);
                /*
                 * If we get back any 2xx status code, then we know we should
                 * treat the service call as successful.
                 */
                leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen();
                HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse);
                T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse,
                        executionContext);
                return new Response<T>(response, httpResponse);
            }
            if (isTemporaryRedirect(apacheResponse)) {
                /*
                 * S3 sends 307 Temporary Redirects if you try to delete an
                 * EU bucket from the US endpoint. If we get a 307, we'll
                 * point the HTTP method to the redirected location, and let
                 * the next retry deliver the request to the right location.
                 */
                Header[] locationHeaders = apacheResponse.getHeaders("location");
                String redirectedLocation = locationHeaders[0].getValue();
                if (log.isDebugEnabled())
                    log.debug("Redirecting to: " + redirectedLocation);
                redirectedURI = URI.create(redirectedLocation);
                httpRequest.setURI(redirectedURI);
                awsRequestMetrics.addPropertyWith(StatusCode, statusCode)
                        .addPropertyWith(RedirectLocation, redirectedLocation)
                        .addPropertyWith(AWSRequestID, null);
                continue;
            }
            leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen();
            final AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest,
                    apacheResponse);
            awsRequestMetrics.addPropertyWith(AWSRequestID, ase.getRequestId())
                    .addPropertyWith(AWSErrorCode, ase.getErrorCode())
                    .addPropertyWith(StatusCode, ase.getStatusCode());
            if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount,
                    config.getRetryPolicy())) {
                throw ase;
            }
            // Comment out for now. Ref: CR2662349
            // Preserve the cause of retry before retrying
            // awsRequestMetrics.addProperty(RetryCause, ase);
            if (RetryUtils.isThrottlingException(ase)) {
                awsRequestMetrics.incrementCounterWith(ThrottleException).addProperty(ThrottleException, ase);
            }
            // Cache the retryable exception
            retriedException = ase;
            /*
             * Checking for clock skew error again because we don't want to set the
             * global time offset for every service exception.
             */
            if (RetryUtils.isClockSkewError(ase)) {
                int timeOffset = parseClockSkewOffset(apacheResponse, ase);
                SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset);
            }
            resetRequestAfterError(request, ase);
        } catch (IOException ioe) {
            if (log.isInfoEnabled()) {
                log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe);
            }
            captureExceptionMetrics(ioe, awsRequestMetrics);
            awsRequestMetrics.addProperty(AWSRequestID, null);
            AmazonClientException ace = new AmazonClientException(
                    "Unable to execute HTTP request: " + ioe.getMessage(), ioe);
            if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount,
                    config.getRetryPolicy())) {
                throw ace;
            }

            // Cache the retryable exception
            retriedException = ace;
            resetRequestAfterError(request, ioe);
        } catch (RuntimeException e) {
            throw captureExceptionMetrics(e, awsRequestMetrics);
        } catch (Error e) {
            throw captureExceptionMetrics(e, awsRequestMetrics);
        } finally {
            /*
             * Some response handlers need to manually manage the HTTP
             * connection and will take care of releasing the connection on
             * their own, but if this response handler doesn't need the
             * connection left open, we go ahead and release the it to free
             * up resources.
             */
            if (!leaveHttpConnectionOpen) {
                try {
                    if (apacheResponse != null && apacheResponse.getEntity() != null
                            && apacheResponse.getEntity().getContent() != null) {
                        apacheResponse.getEntity().getContent().close();
                    }
                } catch (IOException e) {
                    log.warn("Cannot close the response content.", e);
                }
            }
        }
    } /* end while (true) */
}