List of usage examples for org.apache.http.impl.conn PoolingHttpClientConnectionManager PoolingHttpClientConnectionManager
public PoolingHttpClientConnectionManager()
From source file:org.commonjava.aprox.client.core.AproxClientHttp.java
public synchronized void connect() { if (this.connectionManager == null) { final PoolingHttpClientConnectionManager pcm = new PoolingHttpClientConnectionManager(); pcm.setDefaultMaxPerRoute(GLOBAL_MAX_CONNECTIONS); this.connectionManager = new CloseBlockingConnectionManager(pcm); }//from w ww .j a va 2 s . c o m }
From source file:org.zanata.sync.jobs.system.ResourceProducer.java
@Produces @RestClient//from w w w . j av a 2 s .c o m protected Client client(@JAXRSClientConnectionPoolSize int poolSize) { // This will create a threadsafe JAX-RS client using pooled connections. // Per default this implementation will create no more than than 2 // concurrent connections per given route and no more 20 connections in // total. (see javadoc of PoolingHttpClientConnectionManager) PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(); CloseableHttpClient closeableHttpClient = HttpClientBuilder.create().setConnectionManager(cm).build(); ApacheHttpClient4Engine engine = new ApacheHttpClient4Engine(closeableHttpClient); return new ResteasyClientBuilder().httpEngine(engine).build(); }
From source file:org.apache.solr.client.solrj.impl.ConnectionReuseTest.java
@Test public void testConnectionReuse() throws Exception { URL url = cluster.getJettySolrRunners().get(0).getBaseUrl(); PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(); CloseableHttpClient httpClient = HttpClientUtil.createClient(null, cm); try (SolrClient client = buildClient(httpClient, url)) { HttpHost target = new HttpHost(url.getHost(), url.getPort(), isSSLMode() ? "https" : "http"); HttpRoute route = new HttpRoute(target); ConnectionRequest mConn = getClientConnectionRequest(httpClient, route, cm); HttpClientConnection conn1 = getConn(mConn); headerRequest(target, route, conn1, cm); cm.releaseConnection(conn1, null, -1, TimeUnit.MILLISECONDS); int queueBreaks = 0; int cnt1 = atLeast(3); int cnt2 = atLeast(30); for (int j = 0; j < cnt1; j++) { boolean done = false; for (int i = 0; i < cnt2; i++) { AddUpdateCommand c = new AddUpdateCommand(null); c.solrDoc = sdoc("id", id.incrementAndGet()); try { client.add(c.solrDoc); } catch (Exception e) { e.printStackTrace(); }/*from w ww . j a va 2 s . c o m*/ if (!done && i > 0 && i < cnt2 - 1 && client instanceof ConcurrentUpdateSolrClient && random().nextInt(10) > 8) { queueBreaks++; done = true; Thread.sleep(350); // wait past streaming client poll time of 250ms } } if (client instanceof ConcurrentUpdateSolrClient) { ((ConcurrentUpdateSolrClient) client).blockUntilFinished(); } } route = new HttpRoute(new HttpHost(url.getHost(), url.getPort(), isSSLMode() ? "https" : "http")); mConn = cm.requestConnection(route, HttpSolrClient.cacheKey); HttpClientConnection conn2 = getConn(mConn); HttpConnectionMetrics metrics = conn2.getMetrics(); headerRequest(target, route, conn2, cm); cm.releaseConnection(conn2, null, -1, TimeUnit.MILLISECONDS); assertNotNull( "No connection metrics found - is the connection getting aborted? server closing the connection? " + client.getClass().getSimpleName(), metrics); // we try and make sure the connection we get has handled all of the requests in this test if (client instanceof ConcurrentUpdateSolrClient) { // we can't fully control queue polling breaking up requests - allow a bit of leeway int exp = cnt1 + queueBreaks + 2; assertTrue( "We expected all communication via streaming client to use one connection! expected=" + exp + " got=" + metrics.getRequestCount(), Math.max(exp, metrics.getRequestCount()) - Math.min(exp, metrics.getRequestCount()) < 3); } else { assertTrue("We expected all communication to use one connection! " + client.getClass().getSimpleName() + " " + metrics.getRequestCount(), cnt1 * cnt2 + 2 <= metrics.getRequestCount()); } } finally { HttpClientUtil.close(httpClient); } }
From source file:org.fcrepo.kernel.impl.identifiers.HttpPidMinter.java
/** * Setup authentication in httpclient./* w w w . j a v a2 s .co m*/ **/ protected HttpClient buildClient() { HttpClientBuilder builder = HttpClientBuilder.create().useSystemProperties() .setConnectionManager(new PoolingHttpClientConnectionManager()); if (!isBlank(username) && !isBlank(password)) { final URI uri = URI.create(url); final CredentialsProvider credsProvider = new BasicCredentialsProvider(); credsProvider.setCredentials(new AuthScope(uri.getHost(), uri.getPort()), new UsernamePasswordCredentials(username, password)); builder = builder.setDefaultCredentialsProvider(credsProvider); } return builder.build(); }
From source file:org.squashtest.tm.plugin.testautomation.jenkins.internal.net.HttpClientProvider.java
public HttpClientProvider() { PoolingHttpClientConnectionManager manager = new PoolingHttpClientConnectionManager(); manager.setMaxTotal(25);/*from w w w . ja v a 2s. c om*/ client = HttpClients.custom().setConnectionManager(manager) .addInterceptorFirst(new PreemptiveAuthInterceptor()) .setDefaultCredentialsProvider(credentialsProvider).build(); requestFactory = new HttpComponentsClientHttpRequestFactory(client); }
From source file:com.arpnetworking.metrics.impl.ApacheHttpSink.java
ApacheHttpSink(final Builder builder, final Logger logger) { this(builder, new SingletonSupplier<>(() -> { final SingletonSupplier<PoolingHttpClientConnectionManager> clientManagerSupplier = new SingletonSupplier<>( () -> {/*w w w . jav a2 s.c o m*/ final PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); connectionManager.setDefaultMaxPerRoute(builder._parallelism); connectionManager.setMaxTotal(builder._parallelism); return connectionManager; }); return HttpClients.custom().setConnectionManager(clientManagerSupplier.get()).build(); }), logger); }
From source file:org.exoplatform.outlook.mail.MailAPI.java
/** * Instantiates a new mail API./*from w w w.j a v a 2s . co m*/ * * @param httpClient the http client * @throws MailServerException the mail server exception */ MailAPI(CloseableHttpClient httpClient) throws MailServerException { if (httpClient == null) { // FYI it's possible make more advanced conn manager settings (host verification X509, conn config, // message parser etc.) PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); // 2 recommended by RFC 2616 sec 8.1.4, we make it bigger for quicker // upload connectionManager.setDefaultMaxPerRoute(10); connectionManager.setMaxTotal(100); // Create global request configuration RequestConfig defaultRequestConfig = RequestConfig.custom().setExpectContinueEnabled(true) .setStaleConnectionCheckEnabled(true).setAuthenticationEnabled(true) .setTargetPreferredAuthSchemes(Arrays.asList(AuthSchemes.BASIC)) // .setProxyPreferredAuthSchemes(Arrays.asList(AuthSchemes.BASIC)) // .setCookieSpec(CookieSpecs.BEST_MATCH) .build(); // Create HTTP client this.httpClient = HttpClients.custom().setConnectionManager(connectionManager) // .setDefaultCredentialsProvider(credsProvider) .setDefaultRequestConfig(defaultRequestConfig).build(); } else { // Use given HTTP client (for tests) this.httpClient = httpClient; } // Default header (Accept JSON), add to those requests where required this.acceptJsonHeader = new BasicHeader("Accept", ContentType.APPLICATION_JSON.getMimeType()); // Add AuthCache to the execution context this.httpContext = HttpClientContext.create(); }
From source file:com.vmware.bdd.plugin.clouderamgr.poller.host.HostInstallPoller.java
@Override public void setup() { try {/*from w w w. j a va 2 s . com*/ login(); httpClientConnectionManager = new PoolingHttpClientConnectionManager(); httpClientConnectionManager.setMaxTotal(20); this.httpClient = HttpClients.custom().setDefaultCookieStore(cookieStore) .setConnectionManager(httpClientConnectionManager).build(); int maxSessionNum = 10; int i = 0; reported = true; executor = Executors.newCachedThreadPool(); for (final ApiCommand command : rootResource.getCommandsResource().readCommand(parentCmdId) .getChildren()) { /* Each crawler will launch a http session with CM server and keep on requesting. * So far, we only report cluster level status, so it's no need to monitor each subcommand, * especially for large scale cluster. */ if (i == maxSessionNum) { break; } executor.submit(new Crawler(command.getId())); i += 1; } } catch (Exception e) { // As this implementation does not follow official APIs, may not work // in future version, just ignore any exception } }