Example usage for org.apache.commons.dbcp2 BasicDataSource setDriverClassName

List of usage examples for org.apache.commons.dbcp2 BasicDataSource setDriverClassName

Introduction

In this page you can find the example usage for org.apache.commons.dbcp2 BasicDataSource setDriverClassName.

Prototype

public synchronized void setDriverClassName(String driverClassName) 

Source Link

Document

Sets the jdbc driver class name.

Note: this method currently has no effect once the pool has been initialized.

Usage

From source file:com.ebay.pulsar.analytics.dao.RDBMS.java

private boolean init(boolean force) {
    if (dataSource == null || force) {
        final BasicDataSource dataSource = new BasicDataSource();
        dataSource.setUsername(userName);
        dataSource.setPassword(userPwd);
        dataSource.setUrl(url);//from  w  ww . ja  v  a  2  s.  co m
        dataSource.setTestOnBorrow(true);
        if (validationQuery != null)
            dataSource.setValidationQuery(validationQuery);
        dataSource.setDriverClassLoader(Thread.currentThread().getContextClassLoader());
        dataSource.setDriverClassName(driver);
        this.setDataSource(dataSource);
    }
    return true;
}

From source file:com.emc.vipr.sync.source.AtmosSource.java

@Override
public void parseCustomOptions(CommandLine line) {
    AtmosUtil.AtmosUri atmosUri = AtmosUtil.parseUri(sourceUri);
    endpoints = atmosUri.endpoints;/*from ww w . j av a2 s.c o m*/
    uid = atmosUri.uid;
    secret = atmosUri.secret;
    namespaceRoot = atmosUri.rootPath;

    if (line.hasOption(SOURCE_OIDLIST_OPTION))
        oidFile = line.getOptionValue(SOURCE_OIDLIST_OPTION);

    if (line.hasOption(SOURCE_NAMELIST_OPTION))
        nameFile = line.getOptionValue(SOURCE_NAMELIST_OPTION);

    if (line.hasOption(SOURCE_SQLQUERY_OPTION)) {
        query = line.getOptionValue(SOURCE_SQLQUERY_OPTION);

        // Initialize a connection pool
        BasicDataSource ds = new BasicDataSource();
        ds.setUrl(line.getOptionValue(JDBC_URL_OPT));
        if (line.hasOption(JDBC_DRIVER_OPT))
            ds.setDriverClassName(line.getOptionValue(JDBC_DRIVER_OPT));
        ds.setUsername(line.getOptionValue(JDBC_USER_OPT));
        ds.setPassword(line.getOptionValue(JDBC_PASSWORD_OPT));
        ds.setMaxTotal(200);
        ds.setMaxOpenPreparedStatements(180);
        setDataSource(ds);
    }

    deleteTags = line.hasOption(DELETE_TAGS_OPT);
}

From source file:io.druid.indexing.jdbc.JDBCIndexTask.java

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();//w w w .  ja  va 2  s  .c  om
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema,
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler()
            .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics),
                    ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(ioConfig.getUser());
    dataSource.setPassword(ioConfig.getPassword());
    dataSource.setUrl(ioConfig.getConnectURI());
    dataSource.setDriverClassLoader(getClass().getClassLoader());

    final String table = ioConfig.getTableName();

    if (!StringUtils.isEmpty(ioConfig.getDriverClass())) {
        dataSource.setDriverClassName(ioConfig.getDriverClass());
    }

    final Handle handle = new DBI(dataSource).open();
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
            final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        appenderator = appenderator0;

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            nextOffsets.putAll(restoredNextPartitions.getOffsetMaps());

            // Sanity checks.
            if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) {
                throw new ISE("WTF?! Restored table[%s] but expected table[%s]",
                        restoredNextPartitions.getTable(), ioConfig.getTableName());
            }

            if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets,
                        ioConfig.getJdbcOffsets().getOffsetMaps());
            }
        }

        // Set up sequenceNames.

        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum,
                    String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {
            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);

                return new Committer() {
                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_OFFSETS,
                                new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot));

                    }

                    @Override
                    public void run() {
                        // Do nothing.
                    }
                };
            }
        };

        //      Set<Integer> assignment = assignPartitionsAndSeekToNext(handle);
        //      boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            //        while (stillReading) {
            //          if (possiblyPause(assignment)) {
            //             The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
            //             partitions upon resuming. This is safe even if the end offsets have not been modified.
            //            assignment = assignPartitionsAndSeekToNext(handle);
            //            if (assignment.isEmpty()) {
            //              log.info("All partitions have been fully read");
            //              publishOnStop = true;
            //              stopRequested = true;
            //            }
            //          }
            //          if (stopRequested) {
            //            break;
            //          }

            final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery()
                    : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets());
            org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query);

            final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() {
                List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList()
                        : ioConfig.getColumns();
                List<Boolean> columnIsNumeric = Lists.newArrayList();

                @Override
                public InputRow map(final int index, final ResultSet r, final StatementContext ctx)
                        throws SQLException {
                    try {
                        if (queryColumns.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                queryColumns.add(metadata.getColumnName(idx));
                            }
                            Preconditions.checkArgument(queryColumns.size() > 0,
                                    String.format("No column in table [%s]", table));
                            verifyParserSpec(parser.getParseSpec(), queryColumns);
                        }
                        if (columnIsNumeric.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(),
                                    String.format(
                                            "number of column names [%d] exceeds the actual number of returning column values [%d]",
                                            queryColumns.size(), metadata.getColumnCount()));
                            columnIsNumeric.add(false); // dummy to make start index to 1
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                boolean isNumeric = false;
                                int type = metadata.getColumnType(idx);
                                switch (type) {
                                case BIGINT:
                                case DECIMAL:
                                case DOUBLE:
                                case FLOAT:
                                case INTEGER:
                                case NUMERIC:
                                case SMALLINT:
                                case TINYINT:
                                    isNumeric = true;
                                    break;
                                }
                                columnIsNumeric.add(isNumeric);
                            }
                        }
                        final Map<String, Object> columnMap = Maps.newHashMap();
                        int columnIdx = 1;
                        for (String column : queryColumns) {
                            Object objToPut = null;
                            if (table != null) {
                                objToPut = r.getObject(column);
                            } else {
                                objToPut = r.getObject(columnIdx);
                            }
                            columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut);

                            columnIdx++;
                        }
                        return parser.parse(columnMap);

                    } catch (IllegalArgumentException e) {
                        throw new SQLException(e);
                    }
                }
            }).iterator();

            org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle
                    .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets()));
            long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0;

            while (rowIterator.hasNext()) {
                InputRow row = rowIterator.next();
                try {
                    if (!ioConfig.getMinimumMessageTime().isPresent()
                            || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {

                        final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data
                        final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName,
                                committerSupplier);

                        if (addResult.isOk()) {
                            // If the number of rows in the segment exceeds the threshold after adding a row,
                            // move the segment out from the active segments of AppenderatorDriver to make a new segment.
                            if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) {
                                driver.moveSegmentOut(sequenceName,
                                        ImmutableList.of(addResult.getSegmentIdentifier()));
                            }
                        } else {
                            // Failure to allocate segment puts determinism at risk, bail out to be safe.
                            // May want configurable behavior here at some point.
                            // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                            throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                    row.getTimestamp());
                        }

                        fireDepartmentMetrics.incrementProcessed();
                    } else {
                        fireDepartmentMetrics.incrementThrownAway();
                    }
                } catch (ParseException e) {
                    if (tuningConfig.isReportParseExceptions()) {
                        throw e;
                    } else {
                        log.debug(e, "Dropping unparseable row from row[%d] .", row);

                        fireDepartmentMetrics.incrementUnparseable();
                    }
                }
            }
            nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset);
            //          if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
            //              && assignment.remove(record.partition())) {
            //            log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition());
            //            stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
            //          }
            //        }
        } finally {
            driver.persist(committerSupplier.get()); // persist pending data
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> {

            final JDBCOffsets finalOffsets = toolbox.getObjectMapper()
                    .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check!
            //        if (!endOffsets.equals(finalOffsets.getOffsetMaps())) {
            //          throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
            //        }

            final SegmentTransactionalInsertAction action;

            if (ioConfig.isUseTransaction()) {
                action = new SegmentTransactionalInsertAction(segments,
                        new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()),
                        new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values
                );
            } else {
                action = new SegmentTransactionalInsertAction(segments, null, null);
            }

            log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());

            return toolbox.getTaskActionClient().submit(action).isSuccess();
        };

        // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting
        // for hand off. See JDBCSupervisorIOConfig.completionTimeout.
        final SegmentsAndMetadata published = driver
                .publish(publisher, committerSupplier.get(), sequenceNames.values()).get();

        final SegmentsAndMetadata handedOff;
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOff = driver.registerHandoff(published).get();
        } else {
            handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(),
                    TimeUnit.MILLISECONDS);
        }

        if (handedOff == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ")
                    .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() {
                        @Override
                        public String apply(DataSegment input) {
                            return input.getIdentifier();
                        }
                    })), handedOff.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } finally

    {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
        handle.close();
    }

    toolbox.getDataSegmentServerAnnouncer().unannounce();

    //TODO::implement
    return success();

}

From source file:annis.administration.AdministrationDao.java

private BasicDataSource createDataSource(String host, String port, String database, String user,
        String password, boolean useSSL, String schema) {

    String url = "jdbc:postgresql://" + host + ":" + port + "/" + database;

    // DriverManagerDataSource is deprecated
    // return new DriverManagerDataSource("org.postgresql.Driver", url, user, password);
    BasicDataSource result = new BasicDataSource();
    result.setUrl(url);// w  w  w. j  av  a  2 s.co  m
    if (useSSL) {
        result.setConnectionProperties("ssl=true");
    }
    result.setUsername(user);
    result.setPassword(password);
    result.setValidationQuery("SELECT 1;");
    result.setAccessToUnderlyingConnectionAllowed(true);
    if (schema == null) {
        schema = "public";
    }
    result.setConnectionInitSqls(Arrays.asList("SET search_path TO \"$user\"," + schema));

    result.setDriverClassName("org.postgresql.Driver");

    return result;
}

From source file:net.sp1d.chym.loader.RootConfig.java

@Bean
DataSource dataSource() {/* w ww.j a  va2 s  .  co  m*/
    BasicDataSource ds = new BasicDataSource();
    ds.setUrl(env.getProperty("db.conn.jdbcUrl"));
    ds.setUsername(env.getProperty("db.conn.user"));
    ds.setPassword(env.getProperty("db.conn.password"));
    ds.setDriverClassName(env.getProperty("db.conn.driverClass"));

    return ds;
}

From source file:net.sp1d.chym.loader.RootConfigDev.java

@Bean
DataSource dataSource() {/*from  w w  w .  j a  va 2  s. co  m*/
    BasicDataSource ds = new BasicDataSource();
    //        ds.setDriverClassName("org.hsqldb.jdbc.JDBCDriver");        
    ////        ds.setUrl("jdbc:hsqldb:mem:db");
    //        ds.setUrl("jdbc:hsqldb:file:db/db");
    //        ds.setUsername("SA");
    //        ds.setPassword("SA");

    ds.setUrl(env.getProperty("db.conn.jdbcUrl"));
    ds.setUsername(env.getProperty("db.conn.user"));
    ds.setPassword(env.getProperty("db.conn.password"));
    ds.setDriverClassName(env.getProperty("db.conn.driverClass"));

    return ds;
}

From source file:no.kantega.publishing.common.util.database.dbConnectionFactory.java

public static void loadConfiguration() {
    try {/* ww w. j  av  a2s .c om*/

        setConfiguration();

        verifyCompleteDatabaseConfiguration();

        DriverManagerDataSource rawDataSource = new DriverManagerDataSource();
        rawDataSource.setDriverClassName(dbDriver);
        rawDataSource.setUrl(dbUrl);

        if (!dbNTMLAuthentication) {
            rawDataSource.setUsername(dbUsername);
            rawDataSource.setPassword(dbPassword);
        }

        if (dbEnablePooling) {
            // Enable DBCP pooling
            BasicDataSource bds = new BasicDataSource();
            bds.setMaxTotal(dbMaxConnections);
            bds.setMaxIdle(dbMaxIdleConnections);
            bds.setMinIdle(dbMinIdleConnections);
            if (dbMaxWait != -1) {
                bds.setMaxWaitMillis(1000 * dbMaxWait);
            }

            if (dbDefaultQueryTimeout != -1) {
                bds.setDefaultQueryTimeout(dbDefaultQueryTimeout);
            }

            bds.setDriverClassName(dbDriver);
            if (!dbNTMLAuthentication) {
                bds.setUsername(dbUsername);
                bds.setPassword(dbPassword);
            }
            bds.setUrl(dbUrl);

            if (dbUseTransactions) {
                bds.setDefaultTransactionIsolation(dbTransactionIsolationLevel);
            }

            if (dbCheckConnections) {
                // Gjr at connections frigjres ved lukking fra database/brannmur
                bds.setValidationQuery("SELECT max(ContentId) from content");
                bds.setTimeBetweenEvictionRunsMillis(1000 * 60 * 2);
                bds.setMinEvictableIdleTimeMillis(1000 * 60 * 5);
                bds.setNumTestsPerEvictionRun(dbMaxConnections);
                if (dbRemoveAbandonedTimeout > 0) {
                    bds.setRemoveAbandonedTimeout(dbRemoveAbandonedTimeout);
                    bds.setLogAbandoned(true);
                }
            }

            ds = bds;
        } else {
            ds = rawDataSource;
        }

        // Use non-pooled datasource for table creation since validation query might fail
        ensureDatabaseExists(rawDataSource);
        if (shouldMigrateDatabase) {
            migrateDatabase(servletContext, rawDataSource);
        }

        if (dbUseTransactions) {
            log.info("Using transactions, database transaction isolation level set to "
                    + dbTransactionIsolationLevel);
        } else {
            log.info("Not using transactions");
        }

        if (debugConnections) {
            proxyDs = (DataSource) Proxy.newProxyInstance(DataSource.class.getClassLoader(),
                    new Class[] { DataSource.class }, new DataSourceWrapper(ds));
        }

    } catch (Exception e) {
        log.error("********* could not read aksess.conf **********", e);
    }

}

From source file:olsps.com.healthsoftproject.config.ApplicationConfigClass.java

@Bean(name = "dataSource")
public DataSource getDataSource() {
    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setDriverClassName("org.apache.derby.jdbc.ClientDriver");
    dataSource.setUrl("jdbc:derby://localhost:1527/medicalcenter");
    dataSource.setUsername("yusufcassim");
    dataSource.setPassword("yusufcassim");
    return dataSource;
}

From source file:org.apache.druid.metadata.storage.derby.DerbyConnector.java

@Inject
public DerbyConnector(MetadataStorage storage, Supplier<MetadataStorageConnectorConfig> config,
        Supplier<MetadataStorageTablesConfig> dbTables) {
    super(config, dbTables);

    final BasicDataSource datasource = getDatasource();
    datasource.setDriverClassLoader(getClass().getClassLoader());
    datasource.setDriverClassName("org.apache.derby.jdbc.ClientDriver");

    this.dbi = new DBI(datasource);
    this.storage = storage;
    log.info("Derby connector instantiated with metadata storage [%s].", this.storage.getClass().getName());
}

From source file:org.apache.druid.metadata.storage.mysql.MySQLConnector.java

@Inject
public MySQLConnector(Supplier<MetadataStorageConnectorConfig> config,
        Supplier<MetadataStorageTablesConfig> dbTables, MySQLConnectorConfig connectorConfig) {
    super(config, dbTables);

    final BasicDataSource datasource = getDatasource();
    // MySQL driver is classloader isolated as part of the extension
    // so we need to help JDBC find the driver
    datasource.setDriverClassLoader(getClass().getClassLoader());
    datasource.setDriverClassName("com.mysql.jdbc.Driver");
    datasource.addConnectionProperty("useSSL", String.valueOf(connectorConfig.isUseSSL()));
    if (connectorConfig.isUseSSL()) {
        log.info("SSL is enabled on this MySQL connection. ");

        datasource.addConnectionProperty("verifyServerCertificate",
                String.valueOf(connectorConfig.isVerifyServerCertificate()));
        if (connectorConfig.isVerifyServerCertificate()) {
            log.info("Server certificate verification is enabled. ");

            if (connectorConfig.getTrustCertificateKeyStoreUrl() != null) {
                datasource.addConnectionProperty("trustCertificateKeyStoreUrl",
                        new File(connectorConfig.getTrustCertificateKeyStoreUrl()).toURI().toString());
            }//from  w  w  w. j a  va2s .co m
            if (connectorConfig.getTrustCertificateKeyStoreType() != null) {
                datasource.addConnectionProperty("trustCertificateKeyStoreType",
                        connectorConfig.getTrustCertificateKeyStoreType());
            }
            if (connectorConfig.getTrustCertificateKeyStorePassword() == null) {
                log.warn(
                        "Trust store password is empty. Ensure that the trust store has been configured with an empty password.");
            } else {
                datasource.addConnectionProperty("trustCertificateKeyStorePassword",
                        connectorConfig.getTrustCertificateKeyStorePassword());
            }
        }
        if (connectorConfig.getClientCertificateKeyStoreUrl() != null) {
            datasource.addConnectionProperty("clientCertificateKeyStoreUrl",
                    new File(connectorConfig.getClientCertificateKeyStoreUrl()).toURI().toString());
        }
        if (connectorConfig.getClientCertificateKeyStoreType() != null) {
            datasource.addConnectionProperty("clientCertificateKeyStoreType",
                    connectorConfig.getClientCertificateKeyStoreType());
        }
        if (connectorConfig.getClientCertificateKeyStorePassword() != null) {
            datasource.addConnectionProperty("clientCertificateKeyStorePassword",
                    connectorConfig.getClientCertificateKeyStorePassword());
        }
        Joiner joiner = Joiner.on(",").skipNulls();
        if (connectorConfig.getEnabledSSLCipherSuites() != null) {
            datasource.addConnectionProperty("enabledSSLCipherSuites",
                    joiner.join(connectorConfig.getEnabledSSLCipherSuites()));
        }
        if (connectorConfig.getEnabledTLSProtocols() != null) {
            datasource.addConnectionProperty("enabledTLSProtocols",
                    joiner.join(connectorConfig.getEnabledTLSProtocols()));
        }
    }

    // use double-quotes for quoting columns, so we can write SQL that works with most databases
    datasource.setConnectionInitSqls(ImmutableList.of("SET sql_mode='ANSI_QUOTES'"));

    this.dbi = new DBI(datasource);

    log.info("Configured MySQL as metadata storage");
}