List of usage examples for org.apache.http.client ClientProtocolException getMessage
public String getMessage()
From source file:crawlercommons.fetcher.http.SimpleHttpFetcher.java
private FetchedResult doRequest(HttpRequestBase request, String url, Payload payload) throws BaseFetchException { LOGGER.trace("Fetching " + url); HttpResponse response;// ww w . ja v a 2s . c o m long readStartTime; Metadata headerMap = new Metadata(); String redirectedUrl = null; String newBaseUrl = null; int numRedirects = 0; boolean needAbort = true; String contentType = ""; String mimeType = ""; String hostAddress = null; int statusCode = HttpStatus.SC_INTERNAL_SERVER_ERROR; String reasonPhrase = null; // Create a local instance of cookie store, and bind to local context // Without this we get killed w/lots of threads, due to sync() on single // cookie store. HttpContext localContext = new BasicHttpContext(); CookieStore cookieStore = localCookieStore.get(); localContext.setAttribute(HttpClientContext.COOKIE_STORE, cookieStore); StringBuilder fetchTrace = null; if (LOGGER.isTraceEnabled()) { fetchTrace = new StringBuilder("Fetched url: " + url); } try { request.setURI(new URI(url)); readStartTime = System.currentTimeMillis(); response = _httpClient.execute(request, localContext); Header[] headers = response.getAllHeaders(); for (Header header : headers) { headerMap.add(header.getName(), header.getValue()); } statusCode = response.getStatusLine().getStatusCode(); reasonPhrase = response.getStatusLine().getReasonPhrase(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; status code: " + statusCode); if (headerMap.get(HttpHeaders.CONTENT_LENGTH) != null) { fetchTrace.append("; Content-Length: " + headerMap.get(HttpHeaders.CONTENT_LENGTH)); } if (headerMap.get(HttpHeaders.LOCATION) != null) { fetchTrace.append("; Location: " + headerMap.get(HttpHeaders.LOCATION)); } } if ((statusCode < 200) || (statusCode >= 300)) { // We can't just check against SC_OK, as some wackos return 201, // 202, etc throw new HttpFetchException(url, "Error fetching " + url + " due to \"" + reasonPhrase + "\"", statusCode, headerMap); } redirectedUrl = extractRedirectedUrl(url, localContext); URI permRedirectUri = (URI) localContext.getAttribute(PERM_REDIRECT_CONTEXT_KEY); if (permRedirectUri != null) { newBaseUrl = permRedirectUri.toURL().toExternalForm(); } Integer redirects = (Integer) localContext.getAttribute(REDIRECT_COUNT_CONTEXT_KEY); if (redirects != null) { numRedirects = redirects.intValue(); } hostAddress = (String) (localContext.getAttribute(HOST_ADDRESS)); if (hostAddress == null) { throw new UrlFetchException(url, "Host address not saved in context"); } Header cth = response.getFirstHeader(HttpHeaders.CONTENT_TYPE); if (cth != null) { contentType = cth.getValue(); } // Check if we should abort due to mime-type filtering. Note that // this will fail if the server // doesn't report a mime-type, but that's how we want it as this // configuration is typically // used when only a subset of parsers are installed/enabled, so we // don't want the auto-detect // code in Tika to get triggered & try to process an unsupported // type. If you want unknown // mime-types from the server to be processed, set "" as one of the // valid mime-types in // FetcherPolicy. mimeType = getMimeTypeFromContentType(contentType); Set<String> mimeTypes = getValidMimeTypes(); if ((mimeTypes != null) && (mimeTypes.size() > 0)) { if (!mimeTypes.contains(mimeType)) { throw new AbortedFetchException(url, "Invalid mime-type: " + mimeType, AbortedFetchReason.INVALID_MIMETYPE); } } needAbort = false; } catch (ClientProtocolException e) { // Oleg guarantees that no abort is needed in the case of an // IOException // (which is is a subclass of) needAbort = false; // If the root case was a "too many redirects" error, we want to map // this to a specific // exception that contains the final redirect. if (e.getCause() instanceof MyRedirectException) { MyRedirectException mre = (MyRedirectException) e.getCause(); String redirectUrl = url; try { redirectUrl = mre.getUri().toURL().toExternalForm(); } catch (MalformedURLException e2) { LOGGER.warn("Invalid URI saved during redirect handling: " + mre.getUri()); } throw new RedirectFetchException(url, redirectUrl, mre.getReason()); } else if (e.getCause() instanceof RedirectException) { LOGGER.error(e.getMessage()); throw new RedirectFetchException(url, extractRedirectedUrl(url, localContext), RedirectExceptionReason.TOO_MANY_REDIRECTS); } else { throw new IOFetchException(url, e); } } catch (IOException e) { // Oleg guarantees that no abort is needed in the case of an // IOException needAbort = false; throw new IOFetchException(url, e); } catch (URISyntaxException e) { throw new UrlFetchException(url, e.getMessage()); } catch (IllegalStateException e) { throw new UrlFetchException(url, e.getMessage()); } catch (BaseFetchException e) { throw e; } catch (Exception e) { // Map anything else to a generic IOFetchException // TODO KKr - create generic fetch exception throw new IOFetchException(url, new IOException(e)); } finally { safeAbort(needAbort, request); } // Figure out how much data we want to try to fetch. int maxContentSize = getMaxContentSize(mimeType); int targetLength = maxContentSize; boolean truncated = false; String contentLengthStr = headerMap.get(HttpHeaders.CONTENT_LENGTH); if (contentLengthStr != null) { try { int contentLength = Integer.parseInt(contentLengthStr); if (contentLength > targetLength) { truncated = true; } else { targetLength = contentLength; } } catch (NumberFormatException e) { // Ignore (and log) invalid content length values. LOGGER.warn("Invalid content length in header: " + contentLengthStr); } } // Now finally read in response body, up to targetLength bytes. // Note that entity might be null, for zero length responses. byte[] content = new byte[0]; long readRate = 0; HttpEntity entity = response.getEntity(); needAbort = true; if (entity != null) { InputStream in = null; try { in = entity.getContent(); byte[] buffer = new byte[BUFFER_SIZE]; int bytesRead = 0; int totalRead = 0; ByteArrayOutputStream out = new ByteArrayOutputStream(DEFAULT_BYTEARRAY_SIZE); int readRequests = 0; int minResponseRate = getMinResponseRate(); // TODO KKr - we need to monitor the rate while reading a // single block. Look at HttpClient // metrics support for how to do this. Once we fix this, fix // the test to read a smaller (< 20K) // chuck of data. while ((totalRead < targetLength) && ((bytesRead = in.read(buffer, 0, Math.min(buffer.length, targetLength - totalRead))) != -1)) { readRequests += 1; totalRead += bytesRead; out.write(buffer, 0, bytesRead); // Assume read time is at least one millisecond, to avoid // DBZ exception. long totalReadTime = Math.max(1, System.currentTimeMillis() - readStartTime); readRate = (totalRead * 1000L) / totalReadTime; // Don't bail on the first read cycle, as we can get a // hiccup starting out. // Also don't bail if we've read everything we need. if ((readRequests > 1) && (totalRead < targetLength) && (readRate < minResponseRate)) { throw new AbortedFetchException(url, "Slow response rate of " + readRate + " bytes/sec", AbortedFetchReason.SLOW_RESPONSE_RATE); } // Check to see if we got interrupted, but don't clear the // interrupted flag. if (Thread.currentThread().isInterrupted()) { throw new AbortedFetchException(url, AbortedFetchReason.INTERRUPTED); } } content = out.toByteArray(); needAbort = truncated || (in.available() > 0); } catch (IOException e) { // We don't need to abort if there's an IOException throw new IOFetchException(url, e); } finally { safeAbort(needAbort, request); safeClose(in); } } // Toss truncated image content. if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated image", AbortedFetchReason.CONTENT_SIZE); } // Now see if we need to uncompress the content. String contentEncoding = headerMap.get(HttpHeaders.CONTENT_ENCODING); if (contentEncoding != null) { if (LOGGER.isTraceEnabled()) { fetchTrace.append("; Content-Encoding: " + contentEncoding); } // TODO KKr We might want to just decompress a truncated gzip // containing text (since we have a max content size to save us // from any gzip corruption). We might want to break the following // out into a separate method, by the way (if not refactor this // entire monolithic method). // try { if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) { if (truncated) { throw new AbortedFetchException(url, "Truncated compressed data", AbortedFetchReason.CONTENT_SIZE); } else { ExpandedResult expandedResult = EncodingUtils.processGzipEncoded(content, maxContentSize); truncated = expandedResult.isTruncated(); if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated decompressed image", AbortedFetchReason.CONTENT_SIZE); } else { content = expandedResult.getExpanded(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; unzipped to " + content.length + " bytes"); } } // } else if ("deflate".equals(contentEncoding)) { // content = // EncodingUtils.processDeflateEncoded(content); // if (LOGGER.isTraceEnabled()) { // fetchTrace.append("; inflated to " + content.length + // " bytes"); // } } } } catch (IOException e) { throw new IOFetchException(url, e); } } // Finally dump out the trace msg we've been building. if (LOGGER.isTraceEnabled()) { LOGGER.trace(fetchTrace.toString()); } // TODO KKr - Save truncated flag in FetchedResult/FetchedDatum. return new FetchedResult(url, redirectedUrl, System.currentTimeMillis(), headerMap, content, contentType, (int) readRate, payload, newBaseUrl, numRedirects, hostAddress, statusCode, reasonPhrase); }
From source file:org.apache.solr.util.SolrCLI.java
/** * Utility function for sending HTTP GET request to Solr and then doing some * validation of the response./* www . j ava2 s .c om*/ */ @SuppressWarnings({ "unchecked" }) public static Map<String, Object> getJson(HttpClient httpClient, String getUrl) throws Exception { try { // ensure we're requesting JSON back from Solr HttpGet httpGet = new HttpGet( new URIBuilder(getUrl).setParameter(CommonParams.WT, CommonParams.JSON).build()); // make the request and get back a parsed JSON object Map<String, Object> json = httpClient.execute(httpGet, new SolrResponseHandler(), HttpClientUtil.createNewHttpClientRequestContext()); // check the response JSON from Solr to see if it is an error Long statusCode = asLong("/responseHeader/status", json); if (statusCode == -1) { throw new SolrServerException( "Unable to determine outcome of GET request to: " + getUrl + "! Response: " + json); } else if (statusCode != 0) { String errMsg = asString("/error/msg", json); if (errMsg == null) errMsg = String.valueOf(json); throw new SolrServerException(errMsg); } else { // make sure no "failure" object in there either Object failureObj = json.get("failure"); if (failureObj != null) { if (failureObj instanceof Map) { Object err = ((Map) failureObj).get(""); if (err != null) throw new SolrServerException(err.toString()); } throw new SolrServerException(failureObj.toString()); } } return json; } catch (ClientProtocolException cpe) { // Currently detecting authentication by string-matching the HTTP response // Perhaps SolrClient should have thrown an exception itself?? if (cpe.getMessage().contains("HTTP ERROR 401") || cpe.getMessage().contentEquals("HTTP ERROR 403")) { int code = cpe.getMessage().contains("HTTP ERROR 401") ? 401 : 403; throw new SolrException(SolrException.ErrorCode.getErrorCode(code), "Solr requires authentication for " + getUrl + ". Please supply valid credentials. HTTP code=" + code); } else { throw cpe; } } }
From source file:crawlercommons.fetcher.SimpleHttpFetcher.java
private FetchedResult doRequest(HttpRequestBase request, String url, Payload payload) throws BaseFetchException { LOGGER.trace("Fetching " + url); HttpResponse response;/*from ww w . j ava 2s .c o m*/ long readStartTime; Metadata headerMap = new Metadata(); String redirectedUrl = null; String newBaseUrl = null; int numRedirects = 0; boolean needAbort = true; String contentType = ""; String mimeType = ""; String hostAddress = null; // Create a local instance of cookie store, and bind to local context // Without this we get killed w/lots of threads, due to sync() on single cookie store. HttpContext localContext = new BasicHttpContext(); CookieStore cookieStore = new BasicCookieStore(); localContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore); StringBuilder fetchTrace = null; if (LOGGER.isTraceEnabled()) { fetchTrace = new StringBuilder("Fetched url: " + url); } try { request.setURI(new URI(url)); readStartTime = System.currentTimeMillis(); response = _httpClient.execute(request, localContext); Header[] headers = response.getAllHeaders(); for (Header header : headers) { headerMap.add(header.getName(), header.getValue()); } int httpStatus = response.getStatusLine().getStatusCode(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; status code: " + httpStatus); if (headerMap.get(HttpHeaders.CONTENT_LENGTH) != null) { fetchTrace.append("; Content-Length: " + headerMap.get(HttpHeaders.CONTENT_LENGTH)); } if (headerMap.get(HttpHeaders.LOCATION) != null) { fetchTrace.append("; Location: " + headerMap.get(HttpHeaders.LOCATION)); } } if ((httpStatus < 200) || (httpStatus >= 300)) { // We can't just check against SC_OK, as some wackos return 201, 202, etc throw new HttpFetchException(url, "Error fetching " + url, httpStatus, headerMap); } redirectedUrl = extractRedirectedUrl(url, localContext); URI permRedirectUri = (URI) localContext.getAttribute(PERM_REDIRECT_CONTEXT_KEY); if (permRedirectUri != null) { newBaseUrl = permRedirectUri.toURL().toExternalForm(); } Integer redirects = (Integer) localContext.getAttribute(REDIRECT_COUNT_CONTEXT_KEY); if (redirects != null) { numRedirects = redirects.intValue(); } hostAddress = (String) (localContext.getAttribute(HOST_ADDRESS)); if (hostAddress == null) { throw new UrlFetchException(url, "Host address not saved in context"); } Header cth = response.getFirstHeader(HttpHeaders.CONTENT_TYPE); if (cth != null) { contentType = cth.getValue(); } // Check if we should abort due to mime-type filtering. Note that this will fail if the server // doesn't report a mime-type, but that's how we want it as this configuration is typically // used when only a subset of parsers are installed/enabled, so we don't want the auto-detect // code in Tika to get triggered & try to process an unsupported type. If you want unknown // mime-types from the server to be processed, set "" as one of the valid mime-types in FetcherPolicy. mimeType = getMimeTypeFromContentType(contentType); Set<String> mimeTypes = getValidMimeTypes(); if ((mimeTypes != null) && (mimeTypes.size() > 0)) { if (!mimeTypes.contains(mimeType)) { throw new AbortedFetchException(url, "Invalid mime-type: " + mimeType, AbortedFetchReason.INVALID_MIMETYPE); } } needAbort = false; } catch (ClientProtocolException e) { // Oleg guarantees that no abort is needed in the case of an IOException (which is is a subclass of) needAbort = false; // If the root case was a "too many redirects" error, we want to map this to a specific // exception that contains the final redirect. if (e.getCause() instanceof MyRedirectException) { MyRedirectException mre = (MyRedirectException) e.getCause(); String redirectUrl = url; try { redirectUrl = mre.getUri().toURL().toExternalForm(); } catch (MalformedURLException e2) { LOGGER.warn("Invalid URI saved during redirect handling: " + mre.getUri()); } throw new RedirectFetchException(url, redirectUrl, mre.getReason()); } else if (e.getCause() instanceof RedirectException) { throw new RedirectFetchException(url, extractRedirectedUrl(url, localContext), RedirectExceptionReason.TOO_MANY_REDIRECTS); } else { throw new IOFetchException(url, e); } } catch (IOException e) { // Oleg guarantees that no abort is needed in the case of an IOException needAbort = false; if (e instanceof ConnectionPoolTimeoutException) { // Should never happen, so let's dump some info about the connection pool. ThreadSafeClientConnManager cm = (ThreadSafeClientConnManager) _httpClient.getConnectionManager(); int numConnections = cm.getConnectionsInPool(); cm.closeIdleConnections(0, TimeUnit.MILLISECONDS); LOGGER.error(String.format( "Got ConnectionPoolTimeoutException: %d connections before, %d after idle close", numConnections, cm.getConnectionsInPool())); } throw new IOFetchException(url, e); } catch (URISyntaxException e) { throw new UrlFetchException(url, e.getMessage()); } catch (IllegalStateException e) { throw new UrlFetchException(url, e.getMessage()); } catch (BaseFetchException e) { throw e; } catch (Exception e) { // Map anything else to a generic IOFetchException // TODO KKr - create generic fetch exception throw new IOFetchException(url, new IOException(e)); } finally { safeAbort(needAbort, request); } // Figure out how much data we want to try to fetch. int maxContentSize = getMaxContentSize(mimeType); int targetLength = maxContentSize; boolean truncated = false; String contentLengthStr = headerMap.get(HttpHeaders.CONTENT_LENGTH); if (contentLengthStr != null) { try { int contentLength = Integer.parseInt(contentLengthStr); if (contentLength > targetLength) { truncated = true; } else { targetLength = contentLength; } } catch (NumberFormatException e) { // Ignore (and log) invalid content length values. LOGGER.warn("Invalid content length in header: " + contentLengthStr); } } // Now finally read in response body, up to targetLength bytes. // Note that entity might be null, for zero length responses. byte[] content = new byte[0]; long readRate = 0; HttpEntity entity = response.getEntity(); needAbort = true; if (entity != null) { InputStream in = null; try { in = entity.getContent(); byte[] buffer = new byte[BUFFER_SIZE]; int bytesRead = 0; int totalRead = 0; ByteArrayOutputStream out = new ByteArrayOutputStream(DEFAULT_BYTEARRAY_SIZE); int readRequests = 0; int minResponseRate = getMinResponseRate(); // TODO KKr - we need to monitor the rate while reading a // single block. Look at HttpClient // metrics support for how to do this. Once we fix this, fix // the test to read a smaller (< 20K) // chuck of data. while ((totalRead < targetLength) && ((bytesRead = in.read(buffer, 0, Math.min(buffer.length, targetLength - totalRead))) != -1)) { readRequests += 1; totalRead += bytesRead; out.write(buffer, 0, bytesRead); // Assume read time is at least one millisecond, to avoid DBZ exception. long totalReadTime = Math.max(1, System.currentTimeMillis() - readStartTime); readRate = (totalRead * 1000L) / totalReadTime; // Don't bail on the first read cycle, as we can get a hiccup starting out. // Also don't bail if we've read everything we need. if ((readRequests > 1) && (totalRead < targetLength) && (readRate < minResponseRate)) { throw new AbortedFetchException(url, "Slow response rate of " + readRate + " bytes/sec", AbortedFetchReason.SLOW_RESPONSE_RATE); } // Check to see if we got interrupted. if (Thread.interrupted()) { throw new AbortedFetchException(url, AbortedFetchReason.INTERRUPTED); } } content = out.toByteArray(); needAbort = truncated || (in.available() > 0); } catch (IOException e) { // We don't need to abort if there's an IOException throw new IOFetchException(url, e); } finally { safeAbort(needAbort, request); safeClose(in); } } // Toss truncated image content. if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated image", AbortedFetchReason.CONTENT_SIZE); } // Now see if we need to uncompress the content. String contentEncoding = headerMap.get(HttpHeaders.CONTENT_ENCODING); if (contentEncoding != null) { if (LOGGER.isTraceEnabled()) { fetchTrace.append("; Content-Encoding: " + contentEncoding); } // TODO KKr We might want to just decompress a truncated gzip // containing text (since we have a max content size to save us // from any gzip corruption). We might want to break the following // out into a separate method, by the way (if not refactor this // entire monolithic method). // try { if ("gzip".equals(contentEncoding) || "x-gzip".equals(contentEncoding)) { if (truncated) { throw new AbortedFetchException(url, "Truncated compressed data", AbortedFetchReason.CONTENT_SIZE); } else { ExpandedResult expandedResult = EncodingUtils.processGzipEncoded(content, maxContentSize); truncated = expandedResult.isTruncated(); if ((truncated) && (!isTextMimeType(mimeType))) { throw new AbortedFetchException(url, "Truncated decompressed image", AbortedFetchReason.CONTENT_SIZE); } else { content = expandedResult.getExpanded(); if (LOGGER.isTraceEnabled()) { fetchTrace.append("; unzipped to " + content.length + " bytes"); } } // } else if ("deflate".equals(contentEncoding)) { // content = EncodingUtils.processDeflateEncoded(content); // if (LOGGER.isTraceEnabled()) { // fetchTrace.append("; inflated to " + content.length + " bytes"); // } } } } catch (IOException e) { throw new IOFetchException(url, e); } } // Finally dump out the trace msg we've been building. if (LOGGER.isTraceEnabled()) { LOGGER.trace(fetchTrace.toString()); } // TODO KKr - Save truncated flag in FetchedResult/FetchedDatum. return new FetchedResult(url, redirectedUrl, System.currentTimeMillis(), headerMap, content, contentType, (int) readRate, payload, newBaseUrl, numRedirects, hostAddress); }
From source file:nextflow.fs.dx.api.DxHttpClient.java
/** * Issues a request against the specified resource and returns either the * text of the response or the parsed JSON of the response (depending on * whether parseResponse is set).// www. j a v a 2 s . c o m */ private ParsedResponse requestImpl(String resource, String data, boolean parseResponse) throws IOException { HttpPost request = new HttpPost(apiserver + resource); request.setHeader("Content-Type", "application/json"); request.setHeader("Authorization", securityContext.get("auth_token_type").textValue() + " " + securityContext.get("auth_token").textValue()); request.setEntity(new StringEntity(data)); // Retry with exponential backoff int timeout = 1; for (int i = 0; i <= NUM_RETRIES; i++) { HttpResponse response = null; boolean okToRetry = false; try { response = httpclient.execute(request); } catch (ClientProtocolException e) { log.error(errorMessage("POST", resource, e.toString(), timeout, i + 1, NUM_RETRIES)); } catch (IOException e) { log.error(errorMessage("POST", resource, e.toString(), timeout, i + 1, NUM_RETRIES)); } if (response != null) { int statusCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); if (statusCode == HttpStatus.SC_OK) { // 200 OK byte[] value = EntityUtils.toByteArray(entity); int realLength = value.length; if (entity.getContentLength() >= 0 && realLength != entity.getContentLength()) { String errorStr = "Received response of " + realLength + " bytes but Content-Length was " + entity.getContentLength(); log.error(errorMessage("POST", resource, errorStr, timeout, i + 1, NUM_RETRIES)); } else { if (parseResponse) { JsonNode responseJson = null; try { responseJson = DxJson.parseJson(new String(value, "UTF-8")); } catch (JsonProcessingException e) { if (entity.getContentLength() < 0) { // content-length was not provided, and the // JSON could not be parsed. Retry since // this is a streaming request from the // server that probably just encountered a // transient error. } else { throw e; } } if (responseJson != null) { return new ParsedResponse(null, responseJson); } } else { return new ParsedResponse(new String(value, "UTF-8"), null); } } } else { // Non-200 status codes. // 500 InternalError should get retried. 4xx errors should // be considered not recoverable. if (statusCode < 500) { throw new IOException(EntityUtils.toString(entity)); } else { log.error(errorMessage("POST", resource, EntityUtils.toString(entity), timeout, i + 1, NUM_RETRIES)); } } } if (i < NUM_RETRIES) { try { Thread.sleep(timeout * 1000); } catch (InterruptedException e) { log.debug("Stopped sleep caused by: {}", e.getMessage()); } timeout *= 2; } } throw new IOException("POST " + resource + " failed"); }
From source file:netinf.node.chunking.ChunkedBO.java
private boolean providesRanges(String url) { HttpClient client = new DefaultHttpClient(); try {/*from w w w . j av a2 s . c om*/ HttpHead httpHead = new HttpHead(url); httpHead.setHeader("Range", "bytes=0-"); try { HttpResponse response = client.execute(httpHead); int status = response.getStatusLine().getStatusCode(); if (status == HttpStatus.SC_PARTIAL_CONTENT || status == HttpStatus.SC_OK) { return true; } } catch (ClientProtocolException e) { LOG.debug(e.getMessage()); } catch (IOException e) { LOG.debug(e.getMessage()); } } catch (IllegalArgumentException e) { LOG.debug(e.getMessage()); } return false; }
From source file:org.exoplatform.utils.ExoConnectionUtils.java
public static InputStream sendRequestWithoutAuthen(HttpResponse response) { InputStream ipstr = null;/*from w w w . java 2s . c o m*/ try { HttpEntity entity; entity = response.getEntity(); if (entity != null) { ipstr = entity.getContent(); } } catch (ClientProtocolException e) { e.getMessage(); } catch (IOException e) { e.getMessage(); } return ipstr; }
From source file:org.flowable.content.rest.service.api.BaseSpringContentRestTestCase.java
protected CloseableHttpResponse internalExecuteRequest(HttpUriRequest request, int expectedStatusCode, boolean addJsonContentType) { CloseableHttpResponse response = null; try {/*from ww w . j av a 2 s. c om*/ if (addJsonContentType && request.getFirstHeader(HttpHeaders.CONTENT_TYPE) == null) { // Revert to default content-type request.addHeader(new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")); } response = client.execute(request); Assert.assertNotNull(response.getStatusLine()); int responseStatusCode = response.getStatusLine().getStatusCode(); if (expectedStatusCode != responseStatusCode) { LOGGER.info("Wrong status code : {}, but should be {}", responseStatusCode, expectedStatusCode); LOGGER.info("Response body: {}", IOUtils.toString(response.getEntity().getContent())); } Assert.assertEquals(expectedStatusCode, responseStatusCode); httpResponses.add(response); return response; } catch (ClientProtocolException e) { Assert.fail(e.getMessage()); } catch (IOException e) { Assert.fail(e.getMessage()); } return null; }
From source file:org.occiware.clouddesigner.occi.linkeddata.connector.LdnodeConnector.java
/** * Simple CRUD function to share data between the MartServer and CloudDesigner. * @param crud determines the type of the operation. */// www.jav a 2s.c o m private void CRUD(CRUD crud) { if (IS_MARTSERVER) { return; } try { DefaultHttpClient httpClient = new DefaultHttpClient(); // Request to find the ldnode by name (which is supposed to be a primary key). HttpGet getRequest = new HttpGet(MARTSERVER_URL + "?category=ldnode&attribute=occi.ld.node.name&value=" + ((this.ldnodePOJO == null) ? this.getName() : this.ldnodePOJO.getAttributes().getName())); getRequest.addHeader("accept", "application/json"); HttpPost postRequest = new HttpPost(MARTSERVER_URL + "ldnode/"); postRequest.addHeader("Content-Type", "application/json"); postRequest.addHeader("accept", "application/json"); HttpResponse response = httpClient.execute(getRequest); if (response.getStatusLine().getStatusCode() != 200) { throw new RuntimeException( "Failed : HTTP error code : " + response.getStatusLine().getStatusCode()); } BufferedReader br = new BufferedReader(new InputStreamReader((response.getEntity().getContent()))); String output = org.apache.commons.io.IOUtils.toString(br); final ObjectMapper mapper = new ObjectMapper(); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); LdnodePOJO ldnodePOJOBuffer = mapper.readValue(output, LdnodePOJO.class); if (crud == CRUD.CREATE) { if (ldnodePOJOBuffer.getId() != null) { LOGGER.error("Couldn't create the resource on the server, it already exists."); } else { LdnodeAttributesPOJO ldnodeAttributes = new LdnodeAttributesPOJO(this.getName(), this.getMongoHosts(), this.getMainProject(), this.getAnalyticsReadPreference()); ldnodePOJO = new LdnodePOJO("urn:uuid:" + this.getId(), this.getTitle(), this.getSummary(), this.getKind().getScheme() + this.getKind().getTitle().toLowerCase(), new ArrayList<String>(), ldnodeAttributes, new ArrayList<String>()); String ldnodeJSONString = mapper.writeValueAsString(ldnodePOJO); postRequest.setEntity(new StringEntity(ldnodeJSONString)); httpClient.execute(postRequest); } } else if (crud == CRUD.READ) { if (ldnodePOJOBuffer.getId() != null) { ldnodePOJO = ldnodePOJOBuffer; this.setTitle(ldnodePOJO.getTitle()); this.setId(ldnodePOJO.getId().substring(LdnodePOJO.ID_AFTER_N_CHARS, ldnodePOJOBuffer.getId().length())); this.setSummary(ldnodePOJO.getSummary()); this.setName(ldnodePOJO.getAttributes().getName()); this.setMongoHosts(ldnodePOJO.getAttributes().getMongoHosts()); this.setMainProject(ldnodePOJO.getAttributes().getMainProject()); this.setAnalyticsReadPreference(ldnodePOJO.getAttributes().getAnalyticsReadPreference()); } else { LOGGER.error("Couldn't find the resource on the server, impossible to retrieve."); } } else if (crud == CRUD.UPDATE) { if (ldnodePOJOBuffer.getId() != null) { LdnodeAttributesPOJO ldnodeAttributes = ldnodePOJO.getAttributes(); ldnodeAttributes.setName(this.getName()); ldnodeAttributes.setMongoHosts(this.getMongoHosts()); ldnodeAttributes.setMainProject(this.getMainProject()); ldnodeAttributes.setAnalyticsReadPreference(this.getAnalyticsReadPreference()); ldnodePOJO.setId("urn:uuid:" + this.getId()); ldnodePOJO.setTitle(this.getTitle()); ldnodePOJO.setSummary(this.getSummary()); String ldnodeJSONString = mapper.writeValueAsString(ldnodePOJO); postRequest.setEntity(new StringEntity(ldnodeJSONString)); httpClient.execute(postRequest); } else { LOGGER.error("Couldn't find the resource on the server, impossible to update."); } } else if (crud == CRUD.DELETE) { if (ldnodePOJOBuffer.getId() != null) { HttpDelete deleteRequest = new HttpDelete(MARTSERVER_URL + "ldnode/" + ldnodePOJOBuffer.getId() .substring(LdnodePOJO.ID_AFTER_N_CHARS, ldnodePOJOBuffer.getId().length())); deleteRequest.addHeader("accept", "application/json"); httpClient.execute(deleteRequest); } else { LOGGER.error("Couldn't find the resource on the server, impossible to delete."); } } httpClient.getConnectionManager().shutdown(); } catch (ClientProtocolException e) { LOGGER.error(e.getMessage()); } catch (IOException e) { LOGGER.error(e.getMessage()); } }