List of usage examples for org.apache.commons.pool.impl GenericObjectPool GenericObjectPool
public GenericObjectPool(PoolableObjectFactory factory, int maxActive)
From source file:org.geosdi.geoplatform.connector.jaxb.pool.GeoPlatformJAXBContextPool.java
/** * /*w w w .ja va 2 s.c o m*/ * * @param classesToBeBound * list of java classes to be recognized by the new {@link JAXBContext}. * Can be empty, in which case a {@link JAXBContext} that only knows about * spec-defined classes will be returned. * * @throws JAXBException * if an error was encountered while creating the * <tt>JAXBContext</tt>, such as (but not limited to): * <ol> * <li>No JAXB implementation was discovered</li> * <li>Classes use JAXB annotations incorrectly</li> * <li>Classes have colliding annotations (i.e., two classes with the same type name)</li> * <li>The JAXB implementation was unable to locate * provider-specific out-of-band information (such as additional * files generated at the development time.)</li> * </ol> * * @throws IllegalArgumentException * if the parameter contains {@code null} (i.e., {@code GeoPlatformJAXBContext(null);}) */ public GeoPlatformJAXBContextPool(Class... classToBeBound) throws JAXBException { super(classToBeBound); this.marshallerPool = new GenericObjectPool<Marshaller>(new GPMarshallerFactory(jaxbContext), new GeoPlatformJAXBConfig()); this.unmarshallerPool = new GenericObjectPool<Unmarshaller>(new GPUnmarshallerFactory(jaxbContext), new GeoPlatformJAXBConfig()); }
From source file:org.geosdi.geoplatform.connector.jaxb.pool.GeoPlatformJAXBContextPool.java
public GeoPlatformJAXBContextPool(JAXBContext theJaxbContext) { super(theJaxbContext); this.marshallerPool = new GenericObjectPool<Marshaller>(new GPMarshallerFactory(jaxbContext), new GeoPlatformJAXBConfig()); this.unmarshallerPool = new GenericObjectPool<Unmarshaller>(new GPUnmarshallerFactory(jaxbContext), new GeoPlatformJAXBConfig()); }
From source file:org.geotools.arcsde.session.SessionPool.java
/** * Creates a new SessionPool object for the given config. * // w w w. j a va 2 s . co m * @param config * holds connection options such as server, user and password, as well as tuning * options as maximum number of connections allowed * @throws IOException * If connection could not be established * @throws NullPointerException * If config is null */ protected SessionPool(ArcSDEConnectionConfig config) throws IOException { if (config == null) { throw new NullPointerException("parameter config can't be null"); } this.config = config; LOGGER.fine("populating ArcSDE connection pool"); this.seConnectionFactory = createConnectionFactory(); final int minConnections = config.getMinConnections().intValue(); final int maxConnections = config.getMaxConnections().intValue(); if (minConnections > maxConnections) { throw new IllegalArgumentException("pool.minConnections > pool.maxConnections"); } {// configure connection pool Config poolCfg = new Config(); // pool upper limit poolCfg.maxActive = config.getMaxConnections().intValue(); // minimum number of idle objects. MAKE SURE this is 0, otherwise the pool will start // trying to create connections permanently even if there's a connection failure, // ultimately leading to the exhaustion of resources poolCfg.minIdle = 0; // how many connections may be idle at any time? -1 = no limit. We're running an // eviction thread to take care of idle connections (see minEvictableIdleTimeMillis and // timeBetweenEvictionRunsMillis) poolCfg.maxIdle = -1; // When reached the pool upper limit, block and wait for an idle connection for maxWait // milliseconds before failing poolCfg.maxWait = config.getConnTimeOut().longValue(); if (poolCfg.maxWait > 0) { poolCfg.whenExhaustedAction = GenericObjectPool.WHEN_EXHAUSTED_BLOCK; } else { poolCfg.whenExhaustedAction = GenericObjectPool.WHEN_EXHAUSTED_FAIL; } // check connection health at borrowObject()? poolCfg.testOnBorrow = true; // check connection health at returnObject()? poolCfg.testOnReturn = false; // check periodically the health of idle connections and discard them if can't be // validated? poolCfg.testWhileIdle = false; // check health of idle connections every 30 seconds // /poolCfg.timeBetweenEvictionRunsMillis = 30000; // drop connections that have been idle for at least 5 minutes poolCfg.minEvictableIdleTimeMillis = 5 * 60 * 1000; pool = new GenericObjectPool(seConnectionFactory, poolCfg); LOGGER.fine("Created ArcSDE connection pool for " + config); } ISession[] preload = new ISession[minConnections]; try { for (int i = 0; i < minConnections; i++) { preload[i] = (ISession) pool.borrowObject(); if (i == 0) { SeRelease seRelease = preload[i].getRelease(); String sdeDesc = seRelease.getDesc(); int major = seRelease.getMajor(); int minor = seRelease.getMinor(); int bugFix = seRelease.getBugFix(); String desc = "ArcSDE " + major + "." + minor + "." + bugFix + " " + sdeDesc; LOGGER.fine("Connected to " + desc); } } for (int i = 0; i < minConnections; i++) { pool.returnObject(preload[i]); } } catch (Exception e) { close(); if (e instanceof IOException) { throw (IOException) e; } throw (IOException) new IOException().initCause(e); } }
From source file:org.infoglue.cms.util.workflow.InfoGlueJDBCPropertySet.java
public void setupDriver(String connectURI, String userName, String password) throws Exception { String validationQuery = "select 1 from cmInfoGlueProperties"; logger.info("Setting up driver."); Class.forName(this.driverClassName).newInstance(); logger.info("dbcpWhenExhaustedAction:" + dbcpWhenExhaustedAction); logger.info("dbcpMaxActive:" + dbcpMaxActive); logger.info("dbcpMaxWait:" + dbcpMaxWait); logger.info("dbcpMaxIdle:" + dbcpMaxIdle); logger.info("dbcpValidationQuery:" + dbcpValidationQuery); int dbcpMaxActiveInt = 200; if (dbcpMaxActive != null && !dbcpMaxActive.equals("")) dbcpMaxActiveInt = Integer.parseInt(dbcpMaxActive); logger.info("dbcpMaxActiveInt:" + dbcpMaxActiveInt); connectionPool = new GenericObjectPool(null, dbcpMaxActiveInt); connectionPool.setTestOnBorrow(true); connectionFactory = new DriverManagerConnectionFactory(connectURI, userName, password); poolableConnectionFactory = new PoolableConnectionFactory(connectionFactory, connectionPool, null, validationQuery, false, true); Class.forName("org.apache.commons.dbcp.PoolingDriver"); driver = (PoolingDriver) DriverManager.getDriver("jdbc:apache:commons:dbcp:"); driver.registerPool("infoGlueJDBCPropertySet", connectionPool); }
From source file:org.jongo.jdbc.JDBCConnectionFactory.java
/** * Instantiates a new JDBCConnectionFactory if required and creates a connections pool for every database. * @return the instance of the singleton. *//*from w w w . j av a2 s . c om*/ private static JDBCConnectionFactory instanceOf() { if (instance == null) { instance = new JDBCConnectionFactory(); for (DatabaseConfiguration db : configuration.getDatabases()) { l.debug("Registering Connection Pool for " + db.getDatabase()); GenericObjectPool pool = new GenericObjectPool(null, db.getMaxConnections()); ConnectionFactory connectionFactory = new DriverManagerConnectionFactory(db.toJdbcURL(), db.getUsername(), db.getPassword()); PoolableConnectionFactory poolableConnectionFactory = new PoolableConnectionFactory( connectionFactory, pool, null, null, db.isReadOnly(), true); poolableConnectionFactory.hashCode(); instance.connectionPool.put(db.getDatabase(), pool); } } return instance; }
From source file:org.kiji.schema.impl.hbase.KijiHTablePool.java
/** * Primary constructor./*from w w w . j a v a2 s .c o m*/ * * @param name The name of the table that will be used with this pool. * @param kiji The HBaseKiji instance backing these tables. * @param tableFactory A factory to create the underlying HTableInterfaces. This factory must * create HTables. */ public KijiHTablePool(String name, HBaseKiji kiji, HTableInterfaceFactory tableFactory) { mTableName = name; mHTableFactory = tableFactory; mKiji = kiji; mConstructorStack = CLEANUP_LOG.isDebugEnabled() ? Debug.getStackTrace() : null; mHBaseTableName = KijiManagedHBaseTableName.getKijiTableName(mKiji.getURI().getInstance(), mTableName) .toString(); final GenericObjectPool.Config config = new GenericObjectPool.Config(); config.maxActive = -1; config.maxIdle = -1; // This state should not occur, so throw an exception. config.whenExhaustedAction = GenericObjectPool.WHEN_EXHAUSTED_FAIL; config.testOnBorrow = true; mPool = new GenericObjectPool<PooledHTable>(new PooledHTableFactory(), config); }
From source file:org.mobicents.servlet.sip.example.SimpleSipServlet.java
@Override public void init(ServletConfig servletConfig) throws ServletException { logger.info("the simple sip servlet has been started"); super.init(servletConfig); RABBITMQ_CONN_URL = servletConfig.getInitParameter("rabbitmq_conn_url"); EXCHANGE_NAME = servletConfig.getInitParameter("exchange_name"); String pool_max_active = servletConfig.getInitParameter("rabbitmq_pool_max_active"); String pool_max_idle = servletConfig.getInitParameter("rabbitmq_pool_max_idle"); String pool_min_idle = servletConfig.getInitParameter("rabbitmq_pool_min_idle"); String time_between_evication = servletConfig.getInitParameter("rabbitmq_pool_time_between_eviction"); String idle_time_before_eviction = servletConfig .getInitParameter("rabbitmq_pool_idle_time_before_eviction"); logger.info("INIT PARAM: rabbitmq_conn_url = " + RABBITMQ_CONN_URL); logger.info("INIT PARAM : exchange name = " + EXCHANGE_NAME); logger.info("INIT PARAM : pool max active = " + pool_max_active); logger.info("INIT PARAM : pool max idle = " + pool_max_idle); logger.info("INIT PARAM : pool min idle = " + pool_min_idle); logger.info("INIT PARAM : time_between_evication = " + time_between_evication); logger.info("INIT PARAM : idle_time_before_eviction = " + idle_time_before_eviction); try {// w w w.j a va 2 s . c o m POOL_MAX_ACTIVE = Integer.parseInt(pool_max_active); } catch (NumberFormatException e) { logger.error("Impossible to parse the pool max active : " + pool_max_active, e); } try { POOL_MAX_IDLE = Integer.parseInt(pool_max_idle); } catch (NumberFormatException e) { logger.error("Impossible to parse the pool max idle : " + pool_max_idle, e); } try { POOL_MIN_IDLE = Integer.parseInt(pool_min_idle); } catch (NumberFormatException e) { logger.error("Impossible to parse the pool min idle : " + pool_min_idle, e); } try { TIME_BETWEEN_EVICTION = Integer.parseInt(time_between_evication); } catch (NumberFormatException e) { logger.error("Impossible to parse the time between eviction : " + time_between_evication, e); } try { IDL_TIME_BEFORE_EVICTION = Integer.parseInt(idle_time_before_eviction); } catch (NumberFormatException e) { logger.error("Impossible to parse idle time before eviction : " + idle_time_before_eviction, e); } /** * create static instance of rabbitmq connection pool */ try { GenericObjectPool.Config config = new GenericObjectPool.Config(); config.maxActive = POOL_MAX_ACTIVE; config.maxIdle = POOL_MAX_IDLE; config.minIdle = POOL_MIN_IDLE; config.timeBetweenEvictionRunsMillis = TIME_BETWEEN_EVICTION; config.minEvictableIdleTimeMillis = IDL_TIME_BEFORE_EVICTION; config.testOnBorrow = false; config.testOnReturn = false; config.lifo = GenericObjectPool.DEFAULT_LIFO; config.whenExhaustedAction = GenericObjectPool.WHEN_EXHAUSTED_FAIL; pool = new GenericObjectPool<Channel>(new ConnectionPoolableObjectFactory(RABBITMQ_CONN_URL), config); //create an initial pool instances. /* int initSize = 25; for (int i =0; i < initSize; i++) { try { pool.addObject(); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } */ /* pool.setMaxActive(POOL_MAX_ACTIVE); //maximum connection allowed in the pool (100). If reached, worker thread needs to be blocked to wait. pool.setMinIdle(POOL_MIN_IDLE); //keep minimum idle connection (set to 3) in pool in all time. pool.setMaxIdle(POOL_MAX_IDLE); //No minimum to create new connection when needed (set to -1). pool.setTimeBetweenEvictionRunsMillis(TIME_BETWEEN_EVICTION); //wait up eviction thread in 10 second pool.setMinEvictableIdleTimeMillis(IDL_TIME_BEFORE_EVICTION); //kill the idle connection that is 5 second old pool.setTestOnBorrow(true); //sanity checking when getting connection from pool. pool.setTestOnReturn(true); //sanity check when returning connection to pool. */ } catch (IOException ex) { logger.error("RabbitMQ Pool failed to create. Error = " + ex.getMessage()); throw new ServletException(ex); } //logger.info("HELLO... FINISHED LOADING THE RABBITMQ CONNECTION/CHANNEL POOL"); }
From source file:org.mule.transport.sftp.SftpConnector.java
protected synchronized ObjectPool getClientPool(ImmutableEndpoint endpoint) { GenericObjectPool pool = pools.get(endpoint.getEndpointURI()); if (pool == null) { if (logger.isDebugEnabled()) { logger.debug("Pool is null - creating one for endpoint " + endpoint.getEndpointURI() + " with max size " + getMaxConnectionPoolSize()); }//ww w. j a v a 2s .c o m pool = new GenericObjectPool(new SftpConnectionFactory(endpoint), getMaxConnectionPoolSize()); pool.setTestOnBorrow(isValidateConnections()); pools.put(endpoint.getEndpointURI(), pool); } else { if (logger.isDebugEnabled()) { logger.debug("Using existing pool for endpoint " + endpoint.getEndpointURI() + ". Active: " + pool.getNumActive() + ", Idle:" + pool.getNumIdle()); } } return pool; }
From source file:org.mule.util.pool.CommonsPoolObjectPool.java
public void initialise() throws InitialisationException { GenericObjectPool.Config config = new GenericObjectPool.Config(); if (poolingProfile != null) { config.maxIdle = poolingProfile.getMaxIdle(); config.maxActive = poolingProfile.getMaxActive(); config.maxWait = poolingProfile.getMaxWait(); config.whenExhaustedAction = (byte) poolingProfile.getExhaustedAction(); config.minEvictableIdleTimeMillis = poolingProfile.getMinEvictionMillis(); config.timeBetweenEvictionRunsMillis = poolingProfile.getEvictionCheckIntervalMillis(); }// ww w.ja v a2s. co m pool = new GenericObjectPool(getPooledObjectFactory(), config); try { applyInitialisationPolicy(); } catch (Exception e) { throw new InitialisationException(e, this); } }
From source file:org.opentripplanner.routing.algorithm.strategies.WeightTable.java
/** * Build the weight table, parallelized according to the number of processors *///from w ww . j a v a 2s . c om public void buildTable() { ArrayList<TransitStop> stopVertices; LOG.debug("Number of vertices: " + g.getVertices().size()); stopVertices = new ArrayList<TransitStop>(); for (Vertex gv : g.getVertices()) if (gv instanceof TransitStop) stopVertices.add((TransitStop) gv); int nStops = stopVertices.size(); stopIndices = new IdentityHashMap<Vertex, Integer>(nStops); for (int i = 0; i < nStops; i++) stopIndices.put(stopVertices.get(i), i); LOG.debug("Number of stops: " + nStops); table = new float[nStops][nStops]; for (float[] row : table) Arrays.fill(row, Float.POSITIVE_INFINITY); LOG.debug("Performing search at each transit stop."); int nThreads = Runtime.getRuntime().availableProcessors(); LOG.debug("number of threads: " + nThreads); ArrayBlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(nStops); ThreadPoolExecutor threadPool = new ThreadPoolExecutor(nThreads, nThreads, 10, TimeUnit.SECONDS, taskQueue); GenericObjectPool heapPool = new GenericObjectPool( new PoolableBinHeapFactory<State>(g.getVertices().size()), nThreads); // make one heap and recycle it RoutingRequest options = new RoutingRequest(); // TODO LG Check this change: options.setWalkSpeed(maxWalkSpeed); final double MAX_WEIGHT = 60 * 60 * options.walkReluctance; final double OPTIMISTIC_BOARD_COST = options.getBoardCostLowerBound(); // create a task for each transit stop in the graph ArrayList<Callable<Void>> tasks = new ArrayList<Callable<Void>>(); for (TransitStop origin : stopVertices) { SPTComputer task = new SPTComputer(heapPool, options, MAX_WEIGHT, OPTIMISTIC_BOARD_COST, origin); tasks.add(task); } try { //invoke all of tasks. threadPool.invokeAll(tasks); threadPool.shutdown(); } catch (InterruptedException e) { throw new RuntimeException(e); } floyd(); }