Example usage for org.apache.commons.dbcp2 BasicDataSource setUrl

List of usage examples for org.apache.commons.dbcp2 BasicDataSource setUrl

Introduction

In this page you can find the example usage for org.apache.commons.dbcp2 BasicDataSource setUrl.

Prototype

public synchronized void setUrl(String url) 

Source Link

Document

Sets the #url .

Note: this method currently has no effect once the pool has been initialized.

Usage

From source file:com.thoughtworks.go.server.database.H2Database.java

private void configureDataSource(BasicDataSource source, String url) {
    String databaseUsername = configuration.getUser();
    String databasePassword = configuration.getPassword();
    LOG.info("[db] Using connection configuration {} [User: {}]", url, databaseUsername);
    source.setDriverClassName("org.h2.Driver");
    source.setUrl(url);
    source.setUsername(databaseUsername);
    source.setPassword(databasePassword);
    source.setMaxTotal(configuration.getMaxActive());
    source.setMaxIdle(configuration.getMaxIdle());
}

From source file:net.gcolin.simplerepo.search.SearchController.java

public SearchController(ConfigurationManager configManager) throws IOException {
    this.configManager = configManager;
    File plugins = new File(configManager.getRoot(), "plugins");
    plugins.mkdirs();// w  w w .j  a  va  2 s .co m
    System.setProperty("derby.system.home", plugins.getAbsolutePath());
    BasicDataSource s = new BasicDataSource();
    s.setDriverClassName("org.apache.derby.jdbc.EmbeddedDriver");
    s.setUrl("jdbc:derby:search" + (new File(plugins, "search").exists() ? "" : ";create=true"));
    s.setUsername("su");
    s.setPassword("");
    s.setMaxTotal(10);
    s.setMinIdle(0);
    s.setDefaultAutoCommit(true);
    datasource = s;

    Set<String> allTables = new HashSet<>();
    Connection connection = null;

    try {
        try {
            connection = datasource.getConnection();
            connection.setAutoCommit(false);
            DatabaseMetaData dbmeta = connection.getMetaData();
            try (ResultSet rs = dbmeta.getTables(null, null, null, new String[] { "TABLE" })) {
                while (rs.next()) {
                    allTables.add(rs.getString("TABLE_NAME").toLowerCase());
                }
            }

            if (!allTables.contains("artifact")) {
                QueryRunner run = new QueryRunner();
                run.update(connection,
                        "CREATE TABLE artifactindex(artifact bigint NOT NULL, version bigint NOT NULL)");
                run.update(connection, "INSERT INTO artifactindex (artifact,version) VALUES (?,?)", 1L, 1L);
                run.update(connection,
                        "CREATE TABLE artifact(id bigint NOT NULL,groupId character varying(120), artifactId character varying(120),CONSTRAINT artifact_pkey PRIMARY KEY (id))");
                run.update(connection,
                        "CREATE TABLE artifactversion(artifact_id bigint NOT NULL,id bigint NOT NULL,"
                                + "version character varying(100)," + "reponame character varying(30),"
                                + "CONSTRAINT artifactversion_pkey PRIMARY KEY (id),"
                                + "CONSTRAINT fk_artifactversion_artifact_id FOREIGN KEY (artifact_id) REFERENCES artifact (id) )");
                run.update(connection,
                        "CREATE TABLE artifacttype(version_id bigint NOT NULL,packaging character varying(20) NOT NULL,classifier character varying(30),"
                                + "CONSTRAINT artifacttype_pkey PRIMARY KEY (version_id,packaging,classifier),"
                                + "CONSTRAINT fk_artifacttype_version FOREIGN KEY (version_id) REFERENCES artifactversion (id))");
                run.update(connection, "CREATE INDEX artifactindex ON artifact(groupId,artifactId)");
                run.update(connection, "CREATE INDEX artifactgroupindex ON artifact(groupId)");
                run.update(connection, "CREATE INDEX artifactversionindex ON artifactversion(version)");
            }
            connection.commit();
        } catch (SQLException ex) {
            connection.rollback();
            throw ex;
        } finally {
            DbUtils.close(connection);
        }
    } catch (SQLException ex) {
        throw new IOException(ex);
    }
}

From source file:io.druid.metadata.SQLMetadataConnector.java

protected BasicDataSource getDatasource() {
    MetadataStorageConnectorConfig connectorConfig = getConfig();

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(connectorConfig.getUser());
    dataSource.setPassword(connectorConfig.getPassword());
    String uri = connectorConfig.getConnectURI();
    dataSource.setUrl(uri);

    dataSource.setValidationQuery(getValidationQuery());
    dataSource.setTestOnBorrow(true);// ww w.  j  av a2  s  . c  o m

    return dataSource;
}

From source file:com.norconex.collector.core.data.store.impl.jdbc.JDBCCrawlDataStore.java

private DataSource createDataSource(String dbDir) {
    BasicDataSource ds = new BasicDataSource();
    if (database == Database.DERBY) {
        ds.setDriverClassName("org.apache.derby.jdbc.EmbeddedDriver");
        ds.setUrl("jdbc:derby:" + dbDir + ";create=true");
    } else {/*from  ww  w  .ja  va  2s. c  om*/
        ds.setDriverClassName("org.h2.Driver");
        ds.setUrl("jdbc:h2:" + dbDir + ";WRITE_DELAY=0;AUTOCOMMIT=ON");
    }
    ds.setDefaultAutoCommit(true);
    return ds;
}

From source file:de.micromata.genome.util.runtime.LocalSettingsEnv.java

/**
 * Parses the ds.//from  w ww . j a v a2s  . c om
 */
protected void parseDs() {
    // db.ds.rogerdb.name=RogersOracle
    // db.ds.rogerdb.drivername=oracle.jdbc.driver.OracleDriver
    // db.ds.rogerdb.url=jdbc:oracle:thin:@localhost:1521:rogdb
    // db.ds.rogerdb.username=genome
    // db.ds.rogerdb.password=genome
    List<String> dse = localSettings.getKeysPrefixWithInfix("db.ds", "name");
    for (String dsn : dse) {
        String key = dsn + ".name";
        String name = localSettings.get(key);
        if (StringUtils.isBlank(name) == true) {
            log.error("Name in local-settings is not defined with key: " + key);
            continue;
        }
        key = dsn + ".drivername";
        String driverName = localSettings.get(key);
        if (StringUtils.isBlank(name) == true) {
            log.error("drivername in local-settings is not defined with key: " + key);
            continue;
        }
        key = dsn + ".url";
        String url = localSettings.get(key);
        if (StringUtils.isBlank(name) == true) {
            log.error("url in local-settings is not defined with key: " + key);
            continue;
        }
        key = dsn + ".username";
        String userName = localSettings.get(key);
        key = dsn + ".password";
        String password = localSettings.get(key);
        BasicDataSource bd = dataSourceSuplier.get();

        bd.setDriverClassName(driverName);
        bd.setUrl(url);
        bd.setUsername(userName);
        bd.setPassword(password);
        bd.setMaxTotal(localSettings.getIntValue(dsn + ".maxActive",
                GenericKeyedObjectPoolConfig.DEFAULT_MAX_TOTAL_PER_KEY));
        bd.setMaxIdle(localSettings.getIntValue(dsn + ".maxIdle",
                GenericKeyedObjectPoolConfig.DEFAULT_MAX_IDLE_PER_KEY));
        bd.setMinIdle(localSettings.getIntValue(dsn + ".minIdle",
                GenericKeyedObjectPoolConfig.DEFAULT_MIN_IDLE_PER_KEY));
        bd.setMaxWaitMillis(localSettings.getLongValue(dsn + ".maxWait",
                GenericKeyedObjectPoolConfig.DEFAULT_MAX_WAIT_MILLIS));
        bd.setInitialSize(localSettings.getIntValue(dsn + ".intialSize", 0));
        bd.setDefaultCatalog(localSettings.get(dsn + ".defaultCatalog", null));
        bd.setDefaultAutoCommit(localSettings.getBooleanValue(dsn + ".defaultAutoCommit", true));
        bd.setValidationQuery(localSettings.get(dsn + ".validationQuery", null));
        bd.setValidationQueryTimeout(localSettings.getIntValue(dsn + ".validationQueryTimeout", -1));
        dataSources.put(name, bd);
    }
}

From source file:cz.muni.fi.pv168.project.hotelmanager.TestReservationManagerImpl.java

@Before
public void setUp() throws SQLException {
    BasicDataSource bds = new BasicDataSource();
    //set JDBC driver and URL
    bds.setDriverClassName(EmbeddedDriver.class.getName());
    bds.setUrl("jdbc:derby:memory:TestReservationManagerDB;create=true");
    this.dataSource = bds;
    //populate db with tables and data
    new ResourceDatabasePopulator(new ClassPathResource("schema-javadb.sql")).execute(bds);
    guestManager = new GuestManagerImpl(bds);
    roomManager = new RoomManagerImpl(bds);
    reservationManager = new ReservationManagerImpl(prepareClockMock(now), bds);
}

From source file:com.emc.vipr.sync.source.AtmosSource.java

@Override
public void parseCustomOptions(CommandLine line) {
    AtmosUtil.AtmosUri atmosUri = AtmosUtil.parseUri(sourceUri);
    endpoints = atmosUri.endpoints;//from   w w  w .j  av  a  2 s . c o m
    uid = atmosUri.uid;
    secret = atmosUri.secret;
    namespaceRoot = atmosUri.rootPath;

    if (line.hasOption(SOURCE_OIDLIST_OPTION))
        oidFile = line.getOptionValue(SOURCE_OIDLIST_OPTION);

    if (line.hasOption(SOURCE_NAMELIST_OPTION))
        nameFile = line.getOptionValue(SOURCE_NAMELIST_OPTION);

    if (line.hasOption(SOURCE_SQLQUERY_OPTION)) {
        query = line.getOptionValue(SOURCE_SQLQUERY_OPTION);

        // Initialize a connection pool
        BasicDataSource ds = new BasicDataSource();
        ds.setUrl(line.getOptionValue(JDBC_URL_OPT));
        if (line.hasOption(JDBC_DRIVER_OPT))
            ds.setDriverClassName(line.getOptionValue(JDBC_DRIVER_OPT));
        ds.setUsername(line.getOptionValue(JDBC_USER_OPT));
        ds.setPassword(line.getOptionValue(JDBC_PASSWORD_OPT));
        ds.setMaxTotal(200);
        ds.setMaxOpenPreparedStatements(180);
        setDataSource(ds);
    }

    deleteTags = line.hasOption(DELETE_TAGS_OPT);
}

From source file:com.twosigma.beaker.sql.JDBCClient.java

public BasicDataSource getDataSource(String uri) throws DBConnectionException {
    synchronized (this) {
        try {/* w w  w .j  a  va  2s. com*/

            BasicDataSource ds = dsMap.get(uri);
            if (ds == null) {
                Driver driver = null;
                for (Driver test : drivers) {
                    if (test.acceptsURL(uri)) {
                        driver = test;
                        break;
                    }
                }
                if (driver == null) {
                    DriverManager.getDriver(uri);
                }
                ds = new BasicDataSource();
                ds.setDriver(driver);
                ds.setUrl(uri);
                dsMap.put(uri, ds);
            }
            return ds;

        } catch (SQLException e) {
            //Logger.getLogger(JDBCClient.class.getName()).log(Level.SEVERE, null, e);
            throw new DBConnectionException(uri, e);
        }
    }
}

From source file:com.dsf.dbxtract.cdc.journal.JournalExecutor.java

/**
 * @param agentName//from  w ww.j  a  v  a  2  s .  co m
 *            cdc agent's assigned name
 * @param zookeeper
 *            connection string to ZooKeeper server
 * @param handler
 *            {@link JournalHandler}
 * @param source
 *            {@link Source}
 */
public JournalExecutor(String agentName, String zookeeper, JournalHandler handler, Source source) {
    logPrefix = agentName + " :: ";
    if (logger.isDebugEnabled())
        logger.debug(logPrefix + "Creating executor for " + handler + " and " + source);
    this.agentName = agentName;
    this.zookeeper = zookeeper;
    this.handler = handler;
    this.source = source;
    BasicDataSource ds = dataSources.get(source);
    if (ds == null) {
        if (logger.isDebugEnabled())
            logger.debug(agentName + " :: setting up a connection pool for " + source.toString());
        ds = new BasicDataSource();
        ds.setDriverClassName(source.getDriver());
        ds.setUsername(source.getUser());
        ds.setPassword(source.getPassword());
        ds.setUrl(source.getConnection());
        dataSources.put(source, ds);
    }

    if (statistics == null)
        statistics = new Statistics();
}

From source file:io.druid.indexing.jdbc.JDBCIndexTask.java

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();/*from w w  w .j  av a 2  s . com*/
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema,
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler()
            .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics),
                    ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(ioConfig.getUser());
    dataSource.setPassword(ioConfig.getPassword());
    dataSource.setUrl(ioConfig.getConnectURI());
    dataSource.setDriverClassLoader(getClass().getClassLoader());

    final String table = ioConfig.getTableName();

    if (!StringUtils.isEmpty(ioConfig.getDriverClass())) {
        dataSource.setDriverClassName(ioConfig.getDriverClass());
    }

    final Handle handle = new DBI(dataSource).open();
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
            final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        appenderator = appenderator0;

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            nextOffsets.putAll(restoredNextPartitions.getOffsetMaps());

            // Sanity checks.
            if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) {
                throw new ISE("WTF?! Restored table[%s] but expected table[%s]",
                        restoredNextPartitions.getTable(), ioConfig.getTableName());
            }

            if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets,
                        ioConfig.getJdbcOffsets().getOffsetMaps());
            }
        }

        // Set up sequenceNames.

        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum,
                    String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {
            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);

                return new Committer() {
                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_OFFSETS,
                                new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot));

                    }

                    @Override
                    public void run() {
                        // Do nothing.
                    }
                };
            }
        };

        //      Set<Integer> assignment = assignPartitionsAndSeekToNext(handle);
        //      boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            //        while (stillReading) {
            //          if (possiblyPause(assignment)) {
            //             The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
            //             partitions upon resuming. This is safe even if the end offsets have not been modified.
            //            assignment = assignPartitionsAndSeekToNext(handle);
            //            if (assignment.isEmpty()) {
            //              log.info("All partitions have been fully read");
            //              publishOnStop = true;
            //              stopRequested = true;
            //            }
            //          }
            //          if (stopRequested) {
            //            break;
            //          }

            final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery()
                    : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets());
            org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query);

            final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() {
                List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList()
                        : ioConfig.getColumns();
                List<Boolean> columnIsNumeric = Lists.newArrayList();

                @Override
                public InputRow map(final int index, final ResultSet r, final StatementContext ctx)
                        throws SQLException {
                    try {
                        if (queryColumns.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                queryColumns.add(metadata.getColumnName(idx));
                            }
                            Preconditions.checkArgument(queryColumns.size() > 0,
                                    String.format("No column in table [%s]", table));
                            verifyParserSpec(parser.getParseSpec(), queryColumns);
                        }
                        if (columnIsNumeric.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(),
                                    String.format(
                                            "number of column names [%d] exceeds the actual number of returning column values [%d]",
                                            queryColumns.size(), metadata.getColumnCount()));
                            columnIsNumeric.add(false); // dummy to make start index to 1
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                boolean isNumeric = false;
                                int type = metadata.getColumnType(idx);
                                switch (type) {
                                case BIGINT:
                                case DECIMAL:
                                case DOUBLE:
                                case FLOAT:
                                case INTEGER:
                                case NUMERIC:
                                case SMALLINT:
                                case TINYINT:
                                    isNumeric = true;
                                    break;
                                }
                                columnIsNumeric.add(isNumeric);
                            }
                        }
                        final Map<String, Object> columnMap = Maps.newHashMap();
                        int columnIdx = 1;
                        for (String column : queryColumns) {
                            Object objToPut = null;
                            if (table != null) {
                                objToPut = r.getObject(column);
                            } else {
                                objToPut = r.getObject(columnIdx);
                            }
                            columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut);

                            columnIdx++;
                        }
                        return parser.parse(columnMap);

                    } catch (IllegalArgumentException e) {
                        throw new SQLException(e);
                    }
                }
            }).iterator();

            org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle
                    .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets()));
            long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0;

            while (rowIterator.hasNext()) {
                InputRow row = rowIterator.next();
                try {
                    if (!ioConfig.getMinimumMessageTime().isPresent()
                            || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {

                        final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data
                        final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName,
                                committerSupplier);

                        if (addResult.isOk()) {
                            // If the number of rows in the segment exceeds the threshold after adding a row,
                            // move the segment out from the active segments of AppenderatorDriver to make a new segment.
                            if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) {
                                driver.moveSegmentOut(sequenceName,
                                        ImmutableList.of(addResult.getSegmentIdentifier()));
                            }
                        } else {
                            // Failure to allocate segment puts determinism at risk, bail out to be safe.
                            // May want configurable behavior here at some point.
                            // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                            throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                    row.getTimestamp());
                        }

                        fireDepartmentMetrics.incrementProcessed();
                    } else {
                        fireDepartmentMetrics.incrementThrownAway();
                    }
                } catch (ParseException e) {
                    if (tuningConfig.isReportParseExceptions()) {
                        throw e;
                    } else {
                        log.debug(e, "Dropping unparseable row from row[%d] .", row);

                        fireDepartmentMetrics.incrementUnparseable();
                    }
                }
            }
            nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset);
            //          if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
            //              && assignment.remove(record.partition())) {
            //            log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition());
            //            stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
            //          }
            //        }
        } finally {
            driver.persist(committerSupplier.get()); // persist pending data
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> {

            final JDBCOffsets finalOffsets = toolbox.getObjectMapper()
                    .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check!
            //        if (!endOffsets.equals(finalOffsets.getOffsetMaps())) {
            //          throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
            //        }

            final SegmentTransactionalInsertAction action;

            if (ioConfig.isUseTransaction()) {
                action = new SegmentTransactionalInsertAction(segments,
                        new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()),
                        new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values
                );
            } else {
                action = new SegmentTransactionalInsertAction(segments, null, null);
            }

            log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());

            return toolbox.getTaskActionClient().submit(action).isSuccess();
        };

        // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting
        // for hand off. See JDBCSupervisorIOConfig.completionTimeout.
        final SegmentsAndMetadata published = driver
                .publish(publisher, committerSupplier.get(), sequenceNames.values()).get();

        final SegmentsAndMetadata handedOff;
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOff = driver.registerHandoff(published).get();
        } else {
            handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(),
                    TimeUnit.MILLISECONDS);
        }

        if (handedOff == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ")
                    .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() {
                        @Override
                        public String apply(DataSegment input) {
                            return input.getIdentifier();
                        }
                    })), handedOff.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } finally

    {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
        handle.close();
    }

    toolbox.getDataSegmentServerAnnouncer().unannounce();

    //TODO::implement
    return success();

}