Example usage for org.apache.commons.dbutils QueryRunner QueryRunner

List of usage examples for org.apache.commons.dbutils QueryRunner QueryRunner

Introduction

In this page you can find the example usage for org.apache.commons.dbutils QueryRunner QueryRunner.

Prototype

public QueryRunner() 

Source Link

Document

Constructor for QueryRunner.

Usage

From source file:com.versatus.jwebshield.securitylock.SecurityLockService.java

public boolean resetLock(int userId, String ip) throws SQLException {

    QueryRunner run = new QueryRunner();
    Connection conn = dbHelper.getConnection();

    logger.info("resetting lock for IP " + ip);
    int r = run.update(conn, resetLockSql, new Object[] { ip });

    logger.debug("checkAccountLock: reset response=" + r);

    return r > 0;
}

From source file:com.gs.obevo.dbmetadata.impl.dialects.MsSqlMetadataDialect.java

@Override
public ImmutableCollection<DaRoutine> searchExtraRoutines(final DaSchema schema, String procedureName,
        Connection conn) throws SQLException {
    String nameClause = procedureName != null ? " and ROUTINE_NAME = '" + procedureName + "'\n" : " ";

    String query = "SELECT" + "    ROUTINE_CATALOG," + "    ROUTINE_SCHEMA," + "    ROUTINE_NAME,"
            + "    SPECIFIC_NAME," + "    ROUTINE_TYPE,"
            + "    OBJECT_DEFINITION(OBJECT_ID(ROUTINE_CATALOG + '.' + ROUTINE_SCHEMA + '.' + ROUTINE_NAME)) AS ROUTINE_DEFINITION"
            + " FROM INFORMATION_SCHEMA.ROUTINES" + " WHERE ROUTINE_CATALOG = '" + schema.getName() + "'"
            + nameClause;/*  w  ww . j a v a  2  s. c  o  m*/
    QueryRunner qr = new QueryRunner(); // using queryRunner so that we can reuse the connection
    ImmutableList<Map<String, Object>> maps = ListAdapter.adapt(qr.query(conn, query, new MapListHandler()))
            .toImmutable();

    return maps.collect(new Function<Map<String, Object>, DaRoutine>() {
        @Override
        public DaRoutine valueOf(Map<String, Object> object) {
            DaRoutineType routineType = DaRoutineType
                    .valueOf(((String) object.get("ROUTINE_TYPE")).toLowerCase());
            return new DaRoutinePojoImpl((String) object.get("ROUTINE_NAME"), schema, routineType,
                    (String) object.get("SPECIFIC_NAME"), (String) object.get("ROUTINE_DEFINITION"));
        }
    });
}

From source file:com.odap.server.scheduler.JobManager.java

public synchronized static void removeJob(String job_id) {
    logger.info("Entering removeJob");
    try {/*from w  ww. java2 s.c om*/
        String sql = "DELETE FROM jobs WHERE id = " + job_id;
        logger.info(sql);
        new QueryRunner().update(conn, sql);
    } catch (SQLException e) {
        logger.error("Error removing job", e);
    }
    logger.info("Exiting removeJob");
}

From source file:hermes.store.schema.DefaultJDBCAdapter.java

public Collection<Destination> getDestinations(Connection connection, String storeId)
        throws SQLException, JMSException {
    final Collection<Destination> destinations = new ArrayList<Destination>();
    final QueryRunner runner = new QueryRunner();

    Hermes.ui.getDefaultMessageSink().add("Getting message store destinations....");

    runner.query(connection, "select distinct destination, domain from stores where storeId=? ",
            new Object[] { storeId }, new ResultSetHandler() {
                public Object handle(ResultSet rs) throws SQLException {
                    while (rs.next()) {
                        final Domain domain = Domain.getDomain(rs.getInt(2));
                        if (domain.equals(Domain.QUEUE)) {
                            destinations.add(new MessageStoreQueue(rs.getString(1)));
                        } else if (domain.equals(Domain.TOPIC)) {
                            destinations.add(new MessageStoreTopic(rs.getString(1)));
                        } else if (domain.equals(Domain.FOLDER)) {
                            destinations.add(new MessageStoreFolder(rs.getString(1)));
                        }/*from   www  .  java  2 s. c  o  m*/

                    }

                    return destinations;
                }
            });

    Hermes.ui.getDefaultMessageSink().add("Getting message store folders.... done.");

    return destinations;
}

From source file:azkaban.project.JdbcProjectLoader.java

private Project fetchProjectByName(Connection connection, String name) throws ProjectManagerException {
    QueryRunner runner = new QueryRunner();
    // Fetch the project
    Project project = null;//from   w w  w.j a  va 2 s  .  c om
    ProjectResultHandler handler = new ProjectResultHandler();
    try {
        List<Project> projects = runner.query(connection, ProjectResultHandler.SELECT_PROJECT_BY_NAME, handler,
                name);
        if (projects.isEmpty()) {
            throw new ProjectManagerException("No project with name " + name + " exists in db.");
        }

        project = projects.get(0);
    } catch (SQLException e) {
        logger.error(ProjectResultHandler.SELECT_PROJECT_BY_NAME + " failed.");
        throw new ProjectManagerException("Query for existing project failed. Project " + name, e);
    }

    // Fetch the user permissions
    List<Triple<String, Boolean, Permission>> permissions = fetchPermissionsForProject(connection, project);

    for (Triple<String, Boolean, Permission> perm : permissions) {
        if (perm.getThird().toFlags() != 0) {
            if (perm.getSecond()) {
                project.setGroupPermission(perm.getFirst(), perm.getThird());
            } else {
                project.setUserPermission(perm.getFirst(), perm.getThird());
            }
        }
    }

    return project;
}

From source file:com.netflix.metacat.usermetadata.mysql.MySqlTagService.java

/**
 * Returns the list of tags of the tag item id.
 * @param tagItemId tag item id//from   w ww . j a v  a  2  s.  co  m
 * @return list of tags
 */
public Set<String> getValues(final Long tagItemId) {
    final Connection connection = DBUtil.getReadConnection(getDataSource());
    try {
        return new QueryRunner().query(connection, SQL_GET_TAG_ITEM_TAGS, rs -> {
            final Set<String> result = Sets.newHashSet();
            while (rs.next()) {
                result.add(rs.getString("value"));
            }
            return result;
        }, tagItemId);
    } catch (Exception e) {
        final String message = String.format("Failed to get the tags for id %s", tagItemId);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    } finally {
        DBUtil.closeReadConnection(connection);
    }
}

From source file:com.gs.obevo.dbmetadata.impl.dialects.SybaseAseMetadataDialect.java

@Override
public ImmutableCollection<ExtraIndexInfo> searchExtraConstraintIndices(DaSchema schema, String tableName,
        Connection conn) throws SQLException {
    QueryRunner query = new QueryRunner(); // using queryRunner so that we can reuse the connection

    // Do not use ANSI JOIN as it does not work in Sybase 11.x - the SQL below works across all versions
    String tableClause = tableName == null ? "" : " AND tab.name = '" + tableName + "'";
    ImmutableList<Map<String, Object>> maps = ListAdapter.adapt(query.query(conn,
            "select tab.name TABLE_NAME, ind.name INDEX_NAME, status2 & 8 IS_CONSTRAINT, status2 & 512 IS_CLUSTERED "
                    + "from " + schema.getName() + "..sysindexes ind, " + schema.getName() + "..sysobjects tab "
                    + "where ind.id = tab.id " + tableClause,
            new MapListHandler())).toImmutable();

    return maps.collect(new Function<Map<String, Object>, ExtraIndexInfo>() {
        @Override//ww w  . j av  a  2 s .c o  m
        public ExtraIndexInfo valueOf(Map<String, Object> map) {
            return new ExtraIndexInfo((String) map.get("TABLE_NAME"), (String) map.get("INDEX_NAME"),
                    (Integer) map.get("IS_CONSTRAINT") != 0, (Integer) map.get("IS_CLUSTERED") != 0);
        }
    });
}

From source file:ch.admin.suis.msghandler.log.DbLogService.java

/**
 * Initializes the log service by creating the database and the log table and getting the SQL connection. This method
 * loads the HSQL JDBC driver and gets the connection to the database. After that an attempt is made to clean up the
 * old records. If the DB table does not exist, it is created. If the JDBC driver cannot be loaded, this method logs
 * atr fatal level and returns./*from  www.  j  a  v a 2 s . c  o m*/
 *
 * @throws SQLException if the JDBC connection or the DB table cannot be created
 */
public void init() throws SQLException {
    // load the driver
    try {
        Class.forName("org.hsqldb.jdbcDriver");
    } catch (ClassNotFoundException e) {
        LOG.fatal("cannot find the HSQL driver class : " + e);
        return;
    }

    // get the connection from the manager
    connection = DriverManager.getConnection("jdbc:hsqldb:" + base + File.separator + DB_FILE_NAME_PREFIX, // filenames
            "sa", // username
            ""); // password

    runner = new QueryRunner();

    // initialize the log size to 1MB to possibly prevent a very slow start if the MH was stopped before
    // a DB checkpoint had been reached (default is 200 MB which is too much)
    runner.update(connection, SET_LOGSIZE_STATEMENT);

    // check if the status table already exists
    try {
        // we run the cleanup statement; if it fails, then the database file does not probably exist
        cleanup();
    } catch (SQLException e) {
        LOG.error(e);
        // create the table if it does not exist
        LOG.debug("Creating the status table...");
        runner.update(connection, CREATE_TABLE_STATEMENT);
        LOG.info("The status table is created.");
    }

}

From source file:net.gcolin.simplerepo.search.SearchController.java

public void rebuild() throws IOException {
    if (running) {
        return;//  w w w .  java  2s . co  m
    }
    running = true;
    configManager.setCurrentAction(REBUILD + "initialize");
    nb = 0;
    try {
        final QueryRunner run = new QueryRunner();

        try {
            Connection connection = null;
            try {
                connection = datasource.getConnection();
                connection.setAutoCommit(false);
                run.update(connection, "delete from artifacttype");
                run.update(connection, "delete from artifactversion");
                run.update(connection, "delete from artifact");
                run.update(connection, "update artifactindex set artifact=1,version=1");
                connection.commit();
            } catch (SQLException ex) {
                connection.rollback();
                throw ex;
            } finally {
                DbUtils.close(connection);
            }
        } catch (SQLException ex) {
            logger.log(Level.SEVERE, null, ex);
            throw new IOException(ex);
        }
        for (final Repository repository : configManager.getConfiguration().getRepositories()) {
            File repo = new File(configManager.getRoot(), repository.getName());
            if (repo.exists()) {
                Files.walkFileTree(repo.toPath(), new SimpleFileVisitor<Path>() {
                    @Override
                    public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                        nb++;
                        if (nb % 20 == 0) {
                            configManager.setCurrentAction(REBUILD + " " + nb + " files");
                        }
                        if (file.toString().endsWith(".pom")) {
                            Model model = readPom(file.toFile());
                            add(repository, file.toFile(), model);
                        }
                        return FileVisitResult.CONTINUE;
                    }

                });
            }
        }
    } finally {
        running = false;
        configManager.setCurrentAction(null);
    }
}

From source file:com.mirth.connect.connectors.jdbc.JdbcMessageReceiver.java

public void processMessage(Object message) throws Exception {
    try {// ww w. j  a  v a2  s.  c  o m
        monitoringController.updateStatus(connector, connectorType, Event.BUSY);

        if (connector.isUseScript() && connector.isUseAck()) {
            // dispatch messages
            UMOMessageAdapter msgAdapter = connector.getMessageAdapter(message);
            UMOMessage umoMessage = new MuleMessage(msgAdapter);
            // we should get an MO back (if we're synchronized...)
            umoMessage = routeMessage(umoMessage, endpoint.isSynchronous());

            Context context = Context.enter();
            Scriptable scope = new ImporterTopLevel(context);
            // load variables in JavaScript scope
            JavaScriptScopeUtil.buildScope(scope, connector.getChannelId(), scriptLogger);
            scope.put("dbMap", scope, jdbcMap);
            scope.put("resultMap", scope, message);

            if (umoMessage != null) {
                MessageObject messageObject = (MessageObject) umoMessage.getPayload();
                postprocessor.doPostProcess(messageObject);
                scope.put("responseMap", scope, messageObject.getResponseMap());
            }

            // get the script from the cache and execute it
            Script compiledScript = compiledScriptCache.getCompiledScript(connector.getAckScriptId());

            if (compiledScript == null) {
                logger.error("Database query update could not be found in cache");
                throw new Exception("Database query update script could not be found in cache");
            } else {
                compiledScript.exec(context, scope);
            }
        } else {
            UMOTransaction tx = TransactionCoordination.getInstance().getTransaction();
            Exception ackException = null;

            try {
                try {
                    if (connector.isUseAck() && ackStmt != null) {
                        int numRows = new QueryRunner().update(connection, ackStmt,
                                JdbcUtils.getParams(getEndpointURI(), ackParams, message));

                        if (numRows != 1) {
                            logger.warn("Row count for ack should be 1 and not " + numRows);
                        }
                    }
                } catch (Exception ue) {
                    logger.error(
                            "Error in the ACK sentence of the JDBC connection, but the message is being sent anyway"
                                    + ue);
                    ackException = ue;
                }

                UMOMessageAdapter msgAdapter = this.connector.getMessageAdapter(message);
                UMOMessage umoMessage = new MuleMessage(msgAdapter);
                UMOMessage retMessage = routeMessage(umoMessage, tx, tx != null || endpoint.isSynchronous());

                if (retMessage != null) {
                    // prevents errors if synchronous is not checked
                    postprocessor.doPostProcess(retMessage.getPayload());
                }

                if (ackException != null) {
                    throw ackException;
                }
            } catch (ConnectException ce) {
                throw new Exception(ce.getCause());
            }
        }
    } catch (Exception e) {
        logger.error(
                "Error in channel: " + ChannelController.getInstance()
                        .getDeployedChannelById(connector.getChannelId()).getName(),
                ExceptionUtils.getRootCause(e));
        alertController.sendAlerts(connector.getChannelId(), Constants.ERROR_406, null, e);
    } finally {
        monitoringController.updateStatus(connector, connectorType, Event.DONE);
    }
}