List of usage examples for org.apache.commons.dbcp2 BasicDataSource setDriverClassLoader
public synchronized void setDriverClassLoader(ClassLoader driverClassLoader)
Sets the class loader to be used to load the JDBC driver.
Note: this method currently has no effect once the pool has been initialized.
From source file:io.druid.server.namespace.cache.JDBCExtractionNamespaceTest.java
@BeforeClass public static final void createTables() { final BasicDataSource datasource = new BasicDataSource(); datasource.setUrl(connectionURI);//from ww w . j a v a 2 s . com datasource.setDriverClassLoader(JDBCExtractionNamespaceTest.class.getClassLoader()); datasource.setDriverClassName("org.apache.derby.jdbc.EmbeddedDriver"); dbi = new DBI(datasource); dbi.withHandle(new HandleCallback<Void>() { @Override public Void withHandle(Handle handle) throws Exception { handle.createStatement( String.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64))", tableName, tsColumn_, keyName, valName)) .execute(); return null; } }); }
From source file:com.teradata.tempto.internal.query.JdbcUtils.java
private static DataSource createPoolingDataSource(JdbcConnectivityParamsState jdbcParamsState) { BasicDataSource dataSource = new BasicDataSource(); dataSource.setDriverClassName(jdbcParamsState.driverClass); dataSource.setUrl(jdbcParamsState.url); dataSource.setUsername(jdbcParamsState.user); dataSource.setPassword(jdbcParamsState.password); dataSource.setDriverClassLoader(getDriverClassLoader(jdbcParamsState)); return dataSource; }
From source file:io.druid.firehose.sql.MySQLFirehoseDatabaseConnector.java
public MySQLFirehoseDatabaseConnector( @JsonProperty("connectorConfig") MetadataStorageConnectorConfig connectorConfig) { this.connectorConfig = connectorConfig; final BasicDataSource datasource = getDatasource(connectorConfig); datasource.setDriverClassLoader(getClass().getClassLoader()); datasource.setDriverClassName("com.mysql.jdbc.Driver"); this.dbi = new DBI(datasource); }
From source file:io.druid.metadata.storage.derby.DerbyConnector.java
@Inject public DerbyConnector(Supplier<MetadataStorageConnectorConfig> config, Supplier<MetadataStorageTablesConfig> dbTables) { super(config, dbTables); final BasicDataSource datasource = getDatasource(); datasource.setDriverClassLoader(getClass().getClassLoader()); datasource.setDriverClassName("org.apache.derby.jdbc.ClientDriver"); this.dbi = new DBI(datasource); log.info("Configured Derby as metadata storage"); }
From source file:io.druid.metadata.storage.postgresql.PostgreSQLConnector.java
@Inject public PostgreSQLConnector(Supplier<MetadataStorageConnectorConfig> config, Supplier<MetadataStorageTablesConfig> dbTables) { super(config, dbTables); final BasicDataSource datasource = getDatasource(); // PostgreSQL driver is classloader isolated as part of the extension // so we need to help JDBC find the driver datasource.setDriverClassLoader(getClass().getClassLoader()); datasource.setDriverClassName("org.postgresql.Driver"); this.dbi = new DBI(datasource); log.info("Configured PostgreSQL as metadata storage"); }
From source file:io.druid.metadata.storage.mysql.MySQLConnector.java
@Inject public MySQLConnector(Supplier<MetadataStorageConnectorConfig> config, Supplier<MetadataStorageTablesConfig> dbTables) { super(config, dbTables); final BasicDataSource datasource = getDatasource(); // MySQL driver is classloader isolated as part of the extension // so we need to help JDBC find the driver datasource.setDriverClassLoader(getClass().getClassLoader()); datasource.setDriverClassName("com.mysql.jdbc.Driver"); // use double-quotes for quoting columns, so we can write SQL that works with most databases datasource.setConnectionInitSqls(ImmutableList.of("SET sql_mode='ANSI_QUOTES'")); this.dbi = new DBI(datasource); log.info("Configured MySQL as metadata storage"); }
From source file:io.druid.metadata.storage.sqlserver.SQLServerConnector.java
@Inject public SQLServerConnector(Supplier<MetadataStorageConnectorConfig> config, Supplier<MetadataStorageTablesConfig> dbTables) { super(config, dbTables); final BasicDataSource datasource = getDatasource(); datasource.setDriverClassLoader(getClass().getClassLoader()); datasource.setDriverClassName("com.microsoft.sqlserver.jdbc.SQLServerDriver"); this.dbi = new DBI(datasource); this.dbi.setStatementRewriter(new CustomStatementRewriter()); log.info("Configured Sql Server as metadata storage"); }
From source file:com.ebay.pulsar.analytics.dao.RDBMS.java
private boolean init(boolean force) { if (dataSource == null || force) { final BasicDataSource dataSource = new BasicDataSource(); dataSource.setUsername(userName); dataSource.setPassword(userPwd); dataSource.setUrl(url);//w w w . j ava 2 s . c o m dataSource.setTestOnBorrow(true); if (validationQuery != null) dataSource.setValidationQuery(validationQuery); dataSource.setDriverClassLoader(Thread.currentThread().getContextClassLoader()); dataSource.setDriverClassName(driver); this.setDataSource(dataSource); } return true; }
From source file:eu.peppol.persistence.jdbc.OxalisDataSourceFactoryDbcpImplTest.java
@Test public void testBasicDataSource() throws Exception { String jdbcDriverClassPath = globalConfiguration.getJdbcDriverClassPath(); URLClassLoader urlClassLoader = new URLClassLoader(new URL[] { new URL(jdbcDriverClassPath) }, Thread.currentThread().getContextClassLoader()); BasicDataSource basicDataSource = new BasicDataSource(); basicDataSource.setDriverClassName(globalConfiguration.getJdbcDriverClassName()); basicDataSource.setUrl(globalConfiguration.getJdbcConnectionURI()); basicDataSource.setUsername(globalConfiguration.getJdbcUsername()); basicDataSource.setPassword(globalConfiguration.getJdbcPassword()); // Does not work in 1.4, fixed in 1.4.1 basicDataSource.setDriverClassLoader(urlClassLoader); try {//from w ww.j ava 2 s .c o m Connection connection = basicDataSource.getConnection(); assertNotNull(connection); } catch (SQLException e) { // As expected when using DBCP 1.4 } }
From source file:io.druid.indexing.jdbc.JDBCIndexTask.java
@Override public TaskStatus run(final TaskToolbox toolbox) throws Exception { log.info("Starting up!"); startTime = DateTime.now();// w w w . ja v a 2s . c om mapper = toolbox.getObjectMapper(); status = Status.STARTING; if (chatHandlerProvider.isPresent()) { log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName()); chatHandlerProvider.get().register(getId(), this, false); } else { log.warn("No chat handler detected"); } runThread = Thread.currentThread(); // Set up FireDepartmentMetrics final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null); fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics(); toolbox.getMonitorScheduler() .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() }))); BasicDataSource dataSource = new BasicDataSource(); dataSource.setUsername(ioConfig.getUser()); dataSource.setPassword(ioConfig.getPassword()); dataSource.setUrl(ioConfig.getConnectURI()); dataSource.setDriverClassLoader(getClass().getClassLoader()); final String table = ioConfig.getTableName(); if (!StringUtils.isEmpty(ioConfig.getDriverClass())) { dataSource.setDriverClassName(ioConfig.getDriverClass()); } final Handle handle = new DBI(dataSource).open(); try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox); final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) { toolbox.getDataSegmentServerAnnouncer().announce(); appenderator = appenderator0; // Start up, set up initial offsets. final Object restoredMetadata = driver.startJob(); if (restoredMetadata == null) { nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps()); } else { final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata; final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper() .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class); nextOffsets.putAll(restoredNextPartitions.getOffsetMaps()); // Sanity checks. if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) { throw new ISE("WTF?! Restored table[%s] but expected table[%s]", restoredNextPartitions.getTable(), ioConfig.getTableName()); } if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) { throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets, ioConfig.getJdbcOffsets().getOffsetMaps()); } } // Set up sequenceNames. final Map<Integer, String> sequenceNames = Maps.newHashMap(); for (Integer partitionNum : nextOffsets.keySet()) { sequenceNames.put(partitionNum, String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum)); } // Set up committer. final Supplier<Committer> committerSupplier = new Supplier<Committer>() { @Override public Committer get() { final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets); return new Committer() { @Override public Object getMetadata() { return ImmutableMap.of(METADATA_NEXT_OFFSETS, new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot)); } @Override public void run() { // Do nothing. } }; } }; // Set<Integer> assignment = assignPartitionsAndSeekToNext(handle); // boolean stillReading = !assignment.isEmpty(); status = Status.READING; try { // while (stillReading) { // if (possiblyPause(assignment)) { // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign // partitions upon resuming. This is safe even if the end offsets have not been modified. // assignment = assignPartitionsAndSeekToNext(handle); // if (assignment.isEmpty()) { // log.info("All partitions have been fully read"); // publishOnStop = true; // stopRequested = true; // } // } // if (stopRequested) { // break; // } final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery() : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets()); org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query); final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() { List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList() : ioConfig.getColumns(); List<Boolean> columnIsNumeric = Lists.newArrayList(); @Override public InputRow map(final int index, final ResultSet r, final StatementContext ctx) throws SQLException { try { if (queryColumns.size() == 0) { ResultSetMetaData metadata = r.getMetaData(); for (int idx = 1; idx <= metadata.getColumnCount(); idx++) { queryColumns.add(metadata.getColumnName(idx)); } Preconditions.checkArgument(queryColumns.size() > 0, String.format("No column in table [%s]", table)); verifyParserSpec(parser.getParseSpec(), queryColumns); } if (columnIsNumeric.size() == 0) { ResultSetMetaData metadata = r.getMetaData(); Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(), String.format( "number of column names [%d] exceeds the actual number of returning column values [%d]", queryColumns.size(), metadata.getColumnCount())); columnIsNumeric.add(false); // dummy to make start index to 1 for (int idx = 1; idx <= metadata.getColumnCount(); idx++) { boolean isNumeric = false; int type = metadata.getColumnType(idx); switch (type) { case BIGINT: case DECIMAL: case DOUBLE: case FLOAT: case INTEGER: case NUMERIC: case SMALLINT: case TINYINT: isNumeric = true; break; } columnIsNumeric.add(isNumeric); } } final Map<String, Object> columnMap = Maps.newHashMap(); int columnIdx = 1; for (String column : queryColumns) { Object objToPut = null; if (table != null) { objToPut = r.getObject(column); } else { objToPut = r.getObject(columnIdx); } columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut); columnIdx++; } return parser.parse(columnMap); } catch (IllegalArgumentException e) { throw new SQLException(e); } } }).iterator(); org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets())); long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0; while (rowIterator.hasNext()) { InputRow row = rowIterator.next(); try { if (!ioConfig.getMinimumMessageTime().isPresent() || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) { final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName, committerSupplier); if (addResult.isOk()) { // If the number of rows in the segment exceeds the threshold after adding a row, // move the segment out from the active segments of AppenderatorDriver to make a new segment. if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) { driver.moveSegmentOut(sequenceName, ImmutableList.of(addResult.getSegmentIdentifier())); } } else { // Failure to allocate segment puts determinism at risk, bail out to be safe. // May want configurable behavior here at some point. // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks. throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp()); } fireDepartmentMetrics.incrementProcessed(); } else { fireDepartmentMetrics.incrementThrownAway(); } } catch (ParseException e) { if (tuningConfig.isReportParseExceptions()) { throw e; } else { log.debug(e, "Dropping unparseable row from row[%d] .", row); fireDepartmentMetrics.incrementUnparseable(); } } } nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset); // if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition())) // && assignment.remove(record.partition())) { // log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition()); // stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty(); // } // } } finally { driver.persist(committerSupplier.get()); // persist pending data } synchronized (statusLock) { if (stopRequested && !publishOnStop) { throw new InterruptedException("Stopping without publishing"); } status = Status.PUBLISHING; } final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> { final JDBCOffsets finalOffsets = toolbox.getObjectMapper() .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class); // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check! // if (!endOffsets.equals(finalOffsets.getOffsetMaps())) { // throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata); // } final SegmentTransactionalInsertAction action; if (ioConfig.isUseTransaction()) { action = new SegmentTransactionalInsertAction(segments, new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()), new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values ); } else { action = new SegmentTransactionalInsertAction(segments, null, null); } log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction()); return toolbox.getTaskActionClient().submit(action).isSuccess(); }; // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting // for hand off. See JDBCSupervisorIOConfig.completionTimeout. final SegmentsAndMetadata published = driver .publish(publisher, committerSupplier.get(), sequenceNames.values()).get(); final SegmentsAndMetadata handedOff; if (tuningConfig.getHandoffConditionTimeout() == 0) { handedOff = driver.registerHandoff(published).get(); } else { handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(), TimeUnit.MILLISECONDS); } if (handedOff == null) { throw new ISE("Transaction failure publishing segments, aborting"); } else { log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ") .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() { @Override public String apply(DataSegment input) { return input.getIdentifier(); } })), handedOff.getCommitMetadata()); } } catch (InterruptedException | RejectedExecutionException e) { // handle the InterruptedException that gets wrapped in a RejectedExecutionException if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) { throw e; } // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow if (!stopRequested) { Thread.currentThread().interrupt(); throw e; } log.info("The task was asked to stop before completing"); } finally { if (chatHandlerProvider.isPresent()) { chatHandlerProvider.get().unregister(getId()); } handle.close(); } toolbox.getDataSegmentServerAnnouncer().unannounce(); //TODO::implement return success(); }