Example usage for org.apache.commons.dbcp2 BasicDataSource BasicDataSource

List of usage examples for org.apache.commons.dbcp2 BasicDataSource BasicDataSource

Introduction

In this page you can find the example usage for org.apache.commons.dbcp2 BasicDataSource BasicDataSource.

Prototype

BasicDataSource

Source Link

Usage

From source file:com.emc.vipr.sync.filter.TrackingFilter.java

@Override
public void parseCustomOptions(CommandLine line) {
    if (!line.hasOption(DB_URL_OPT))
        throw new ConfigurationException("Must provide a database to use the tracking filter");

    if (line.hasOption(TABLE_OPT))
        tableName = line.getOptionValue(TABLE_OPT);

    createTable = line.hasOption(CREATE_TABLE_OPT);
    processAllObjects = line.hasOption(REPROCESS_OPT);

    if (line.hasOption(META_OPT))
        metaTags = Arrays.asList(line.getOptionValue(META_OPT).split(","));

    // Initialize a DB connection pool
    BasicDataSource ds = new BasicDataSource();
    ds.setUrl(line.getOptionValue(DB_URL_OPT));
    if (line.hasOption(DB_DRIVER_OPT))
        ds.setDriverClassName(line.getOptionValue(DB_DRIVER_OPT));
    ds.setUsername(line.getOptionValue(DB_USER_OPT));
    ds.setPassword(line.getOptionValue(DB_PASSWORD_OPT));
    ds.setMaxTotal(200);/*from  www. ja  v a 2s .  co m*/
    ds.setMaxOpenPreparedStatements(180);
    dataSource = ds;
}

From source file:com.emc.vipr.sync.source.AtmosSource.java

@Override
public void parseCustomOptions(CommandLine line) {
    AtmosUtil.AtmosUri atmosUri = AtmosUtil.parseUri(sourceUri);
    endpoints = atmosUri.endpoints;//  w ww .j a  v a  2  s.  c  o m
    uid = atmosUri.uid;
    secret = atmosUri.secret;
    namespaceRoot = atmosUri.rootPath;

    if (line.hasOption(SOURCE_OIDLIST_OPTION))
        oidFile = line.getOptionValue(SOURCE_OIDLIST_OPTION);

    if (line.hasOption(SOURCE_NAMELIST_OPTION))
        nameFile = line.getOptionValue(SOURCE_NAMELIST_OPTION);

    if (line.hasOption(SOURCE_SQLQUERY_OPTION)) {
        query = line.getOptionValue(SOURCE_SQLQUERY_OPTION);

        // Initialize a connection pool
        BasicDataSource ds = new BasicDataSource();
        ds.setUrl(line.getOptionValue(JDBC_URL_OPT));
        if (line.hasOption(JDBC_DRIVER_OPT))
            ds.setDriverClassName(line.getOptionValue(JDBC_DRIVER_OPT));
        ds.setUsername(line.getOptionValue(JDBC_USER_OPT));
        ds.setPassword(line.getOptionValue(JDBC_PASSWORD_OPT));
        ds.setMaxTotal(200);
        ds.setMaxOpenPreparedStatements(180);
        setDataSource(ds);
    }

    deleteTags = line.hasOption(DELETE_TAGS_OPT);
}

From source file:com.thoughtworks.go.server.service.BackupServiceH2IntegrationTest.java

private BasicDataSource constructTestDataSource(File file) {
    BasicDataSource source = new BasicDataSource();
    source.setDriverClassName("org.h2.Driver");
    source.setUrl("jdbc:h2:" + file.getAbsolutePath() + "/cruise;DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE");
    source.setUsername("sa");
    source.setPassword("");
    source.setMaxTotal(32);/* w  w w. j a va  2s.  c  o m*/
    source.setMaxIdle(32);
    return source;
}

From source file:com.zaxxer.hikari.benchmark.BenchBase.java

private void setupDBCP2basic() throws SQLException {
    BasicDataSource ds = new BasicDataSource();
    ds.setDriverClassName(dbDriver);/*from  www .j  av  a 2s.  c o m*/
    ds.setUsername("sa");
    ds.setPassword("");
    ds.setUrl(jdbcURL);
    ds.setMaxTotal(maxPoolSize);
    ds.setDefaultAutoCommit(false);
    ds.setDefaultTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);

    ds.getConnection().createStatement().execute("CREATE TABLE IF NOT EXISTS test (column varchar);");
    DS = ds;
}

From source file:com.ebay.pulsar.analytics.dao.RDBMS.java

private boolean init(boolean force) {
    if (dataSource == null || force) {
        final BasicDataSource dataSource = new BasicDataSource();
        dataSource.setUsername(userName);
        dataSource.setPassword(userPwd);
        dataSource.setUrl(url);/*from   w w w  . j a va2 s  .c o m*/
        dataSource.setTestOnBorrow(true);
        if (validationQuery != null)
            dataSource.setValidationQuery(validationQuery);
        dataSource.setDriverClassLoader(Thread.currentThread().getContextClassLoader());
        dataSource.setDriverClassName(driver);
        this.setDataSource(dataSource);
    }
    return true;
}

From source file:dk.statsbiblioteket.doms.licensemodule.persistence.LicenseModuleStorage.java

public static void initialize(String driverName, String driverUrl, String userName, String password)
        throws SQLException {
    dataSource = new BasicDataSource();
    dataSource.setDriverClassName(driverName);
    dataSource.setUsername(userName);/*from  w  w w .ja va  2 s  .  c  om*/
    dataSource.setPassword(password);
    dataSource.setUrl(driverUrl);

    dataSource.setDefaultReadOnly(false);
    dataSource.setDefaultAutoCommit(false);

    // enable detection and logging of connection leaks
    dataSource.setRemoveAbandonedOnBorrow(true);
    dataSource.setRemoveAbandonedOnMaintenance(true);
    dataSource.setRemoveAbandonedTimeout(3600); // 1 hour
    dataSource.setLogAbandoned(true);
    dataSource.setMaxWaitMillis(60000);
    dataSource.setMaxTotal(20);

    INITDATE = new Date();
}

From source file:io.druid.indexing.jdbc.JDBCIndexTask.java

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();//from  w ww  .  ja v  a2  s .  c  o  m
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema,
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler()
            .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics),
                    ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(ioConfig.getUser());
    dataSource.setPassword(ioConfig.getPassword());
    dataSource.setUrl(ioConfig.getConnectURI());
    dataSource.setDriverClassLoader(getClass().getClassLoader());

    final String table = ioConfig.getTableName();

    if (!StringUtils.isEmpty(ioConfig.getDriverClass())) {
        dataSource.setDriverClassName(ioConfig.getDriverClass());
    }

    final Handle handle = new DBI(dataSource).open();
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
            final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        appenderator = appenderator0;

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            nextOffsets.putAll(restoredNextPartitions.getOffsetMaps());

            // Sanity checks.
            if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) {
                throw new ISE("WTF?! Restored table[%s] but expected table[%s]",
                        restoredNextPartitions.getTable(), ioConfig.getTableName());
            }

            if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets,
                        ioConfig.getJdbcOffsets().getOffsetMaps());
            }
        }

        // Set up sequenceNames.

        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum,
                    String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {
            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);

                return new Committer() {
                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_OFFSETS,
                                new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot));

                    }

                    @Override
                    public void run() {
                        // Do nothing.
                    }
                };
            }
        };

        //      Set<Integer> assignment = assignPartitionsAndSeekToNext(handle);
        //      boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            //        while (stillReading) {
            //          if (possiblyPause(assignment)) {
            //             The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
            //             partitions upon resuming. This is safe even if the end offsets have not been modified.
            //            assignment = assignPartitionsAndSeekToNext(handle);
            //            if (assignment.isEmpty()) {
            //              log.info("All partitions have been fully read");
            //              publishOnStop = true;
            //              stopRequested = true;
            //            }
            //          }
            //          if (stopRequested) {
            //            break;
            //          }

            final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery()
                    : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets());
            org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query);

            final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() {
                List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList()
                        : ioConfig.getColumns();
                List<Boolean> columnIsNumeric = Lists.newArrayList();

                @Override
                public InputRow map(final int index, final ResultSet r, final StatementContext ctx)
                        throws SQLException {
                    try {
                        if (queryColumns.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                queryColumns.add(metadata.getColumnName(idx));
                            }
                            Preconditions.checkArgument(queryColumns.size() > 0,
                                    String.format("No column in table [%s]", table));
                            verifyParserSpec(parser.getParseSpec(), queryColumns);
                        }
                        if (columnIsNumeric.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(),
                                    String.format(
                                            "number of column names [%d] exceeds the actual number of returning column values [%d]",
                                            queryColumns.size(), metadata.getColumnCount()));
                            columnIsNumeric.add(false); // dummy to make start index to 1
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                boolean isNumeric = false;
                                int type = metadata.getColumnType(idx);
                                switch (type) {
                                case BIGINT:
                                case DECIMAL:
                                case DOUBLE:
                                case FLOAT:
                                case INTEGER:
                                case NUMERIC:
                                case SMALLINT:
                                case TINYINT:
                                    isNumeric = true;
                                    break;
                                }
                                columnIsNumeric.add(isNumeric);
                            }
                        }
                        final Map<String, Object> columnMap = Maps.newHashMap();
                        int columnIdx = 1;
                        for (String column : queryColumns) {
                            Object objToPut = null;
                            if (table != null) {
                                objToPut = r.getObject(column);
                            } else {
                                objToPut = r.getObject(columnIdx);
                            }
                            columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut);

                            columnIdx++;
                        }
                        return parser.parse(columnMap);

                    } catch (IllegalArgumentException e) {
                        throw new SQLException(e);
                    }
                }
            }).iterator();

            org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle
                    .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets()));
            long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0;

            while (rowIterator.hasNext()) {
                InputRow row = rowIterator.next();
                try {
                    if (!ioConfig.getMinimumMessageTime().isPresent()
                            || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {

                        final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data
                        final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName,
                                committerSupplier);

                        if (addResult.isOk()) {
                            // If the number of rows in the segment exceeds the threshold after adding a row,
                            // move the segment out from the active segments of AppenderatorDriver to make a new segment.
                            if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) {
                                driver.moveSegmentOut(sequenceName,
                                        ImmutableList.of(addResult.getSegmentIdentifier()));
                            }
                        } else {
                            // Failure to allocate segment puts determinism at risk, bail out to be safe.
                            // May want configurable behavior here at some point.
                            // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                            throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                    row.getTimestamp());
                        }

                        fireDepartmentMetrics.incrementProcessed();
                    } else {
                        fireDepartmentMetrics.incrementThrownAway();
                    }
                } catch (ParseException e) {
                    if (tuningConfig.isReportParseExceptions()) {
                        throw e;
                    } else {
                        log.debug(e, "Dropping unparseable row from row[%d] .", row);

                        fireDepartmentMetrics.incrementUnparseable();
                    }
                }
            }
            nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset);
            //          if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
            //              && assignment.remove(record.partition())) {
            //            log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition());
            //            stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
            //          }
            //        }
        } finally {
            driver.persist(committerSupplier.get()); // persist pending data
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> {

            final JDBCOffsets finalOffsets = toolbox.getObjectMapper()
                    .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check!
            //        if (!endOffsets.equals(finalOffsets.getOffsetMaps())) {
            //          throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
            //        }

            final SegmentTransactionalInsertAction action;

            if (ioConfig.isUseTransaction()) {
                action = new SegmentTransactionalInsertAction(segments,
                        new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()),
                        new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values
                );
            } else {
                action = new SegmentTransactionalInsertAction(segments, null, null);
            }

            log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());

            return toolbox.getTaskActionClient().submit(action).isSuccess();
        };

        // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting
        // for hand off. See JDBCSupervisorIOConfig.completionTimeout.
        final SegmentsAndMetadata published = driver
                .publish(publisher, committerSupplier.get(), sequenceNames.values()).get();

        final SegmentsAndMetadata handedOff;
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOff = driver.registerHandoff(published).get();
        } else {
            handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(),
                    TimeUnit.MILLISECONDS);
        }

        if (handedOff == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ")
                    .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() {
                        @Override
                        public String apply(DataSegment input) {
                            return input.getIdentifier();
                        }
                    })), handedOff.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } finally

    {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
        handle.close();
    }

    toolbox.getDataSegmentServerAnnouncer().unannounce();

    //TODO::implement
    return success();

}

From source file:com.norconex.collector.core.data.store.impl.jdbc.JDBCCrawlDataStore.java

private DataSource createDataSource(String dbDir) {
    BasicDataSource ds = new BasicDataSource();
    if (database == Database.DERBY) {
        ds.setDriverClassName("org.apache.derby.jdbc.EmbeddedDriver");
        ds.setUrl("jdbc:derby:" + dbDir + ";create=true");
    } else {/*from   w w  w  .  ja va2  s. c  o  m*/
        ds.setDriverClassName("org.h2.Driver");
        ds.setUrl("jdbc:h2:" + dbDir + ";WRITE_DELAY=0;AUTOCOMMIT=ON");
    }
    ds.setDefaultAutoCommit(true);
    return ds;
}

From source file:i5.las2peer.services.mobsos.surveys.SurveyService.java

/**
 * TODO: write documentation/*from   w  w w  .  ja va 2s.  co  m*/
 * 
 * @throws ClassNotFoundException
 */
private void setupDataSource() throws ClassNotFoundException {

    // request classloader to load JDBC driver class
    Class.forName(jdbcDriverClassName);

    // prepare and configure data source
    dataSource = new BasicDataSource();
    dataSource.setDefaultAutoCommit(true);
    dataSource.setDriverClassName(jdbcDriverClassName);
    dataSource.setUsername(jdbcLogin);
    dataSource.setPassword(jdbcPass);
    dataSource.setUrl(jdbcUrl + jdbcSchema);
    dataSource.setValidationQuery("select 1");
    dataSource.setDefaultQueryTimeout(1000);
    dataSource.setMaxConnLifetimeMillis(100000);
}

From source file:io.druid.metadata.SQLMetadataConnector.java

protected BasicDataSource getDatasource() {
    MetadataStorageConnectorConfig connectorConfig = getConfig();

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(connectorConfig.getUser());
    dataSource.setPassword(connectorConfig.getPassword());
    String uri = connectorConfig.getConnectURI();
    dataSource.setUrl(uri);/* w w  w.ja v  a  2  s .  c  om*/

    dataSource.setValidationQuery(getValidationQuery());
    dataSource.setTestOnBorrow(true);

    return dataSource;
}